From df2eee71d8ea29eac4182633f4c295826190cced Mon Sep 17 00:00:00 2001 From: Hongyu Liu <43953930+phlrain@users.noreply.github.com> Date: Tue, 25 Jun 2019 15:38:59 +0800 Subject: [PATCH] Sequence mask support tensor (#18249) * sequnce mask support max length tensor input; test=develop * add rnn_impl.py; test=develop * add basic gru lstm unittest; test=develop * fix api spec; test=develop * fix sequence_mask op bug; test=develop test=document_preview * change +-*x to elmentwise_op; test=develop * add mkl flag; test=develop * fix rnn impl bug; test=develop * update api spec; test=develop * fix doc bug; test=develop * fix lstm bugs; test=develop --- paddle/fluid/API.spec | 32 + paddle/fluid/operators/math/math_function.cu | 4 +- .../sequence_ops/sequence_mask_op.cc | 74 ++ .../operators/sequence_ops/sequence_mask_op.h | 66 +- paddle/fluid/operators/shape_op.cc | 3 +- paddle/fluid/operators/shape_op.cu | 1 + paddle/fluid/operators/slice_op.cc | 2 +- paddle/fluid/operators/transpose_op.cc | 5 + paddle/fluid/operators/transpose_op.cu.cc | 4 + .../paddle/fluid/contrib/layers/__init__.py | 2 + .../paddle/fluid/contrib/layers/rnn_impl.py | 743 ++++++++++++++++++ python/paddle/fluid/layers/control_flow.py | 2 +- python/paddle/fluid/layers/nn.py | 18 +- .../fluid/tests/unittests/CMakeLists.txt | 8 + .../tests/unittests/test_basic_gru_api.py | 334 ++++++++ .../tests/unittests/test_basic_gru_unit_op.py | 144 ++++ .../tests/unittests/test_basic_lstm_api.py | 305 +++++++ .../unittests/test_basic_lstm_unit_op.py | 133 ++++ .../tests/unittests/test_sequence_mask.py | 62 ++ 19 files changed, 1886 insertions(+), 56 deletions(-) create mode 100644 python/paddle/fluid/contrib/layers/rnn_impl.py create mode 100644 python/paddle/fluid/tests/unittests/test_basic_gru_api.py create mode 100644 python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py create mode 100644 python/paddle/fluid/tests/unittests/test_basic_lstm_api.py create mode 100644 python/paddle/fluid/tests/unittests/test_basic_lstm_unit_op.py diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index cfa2e0b718..24536e2242 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -430,6 +430,38 @@ paddle.fluid.contrib.multi_upload (ArgSpec(args=['client', 'hdfs_path', 'local_p paddle.fluid.contrib.extend_with_decoupled_weight_decay (ArgSpec(args=['base_optimizer'], varargs=None, keywords=None, defaults=None), ('document', 'a1095dfd4ec725747f662d69cd7659d4')) paddle.fluid.contrib.mixed_precision.decorate (ArgSpec(args=['optimizer', 'init_loss_scaling', 'incr_every_n_steps', 'decr_every_n_nan_or_inf', 'incr_ratio', 'decr_ratio', 'use_dynamic_loss_scaling'], varargs=None, keywords=None, defaults=(1.0, 1000, 2, 2.0, 0.8, False)), ('document', 'bdb8f9dbb0d94b3957272c53eeee9818')) paddle.fluid.contrib.fused_elemwise_activation (ArgSpec(args=['x', 'y', 'functor_list', 'axis', 'scale', 'save_intermediate_out'], varargs=None, keywords=None, defaults=(-1, 0.0, True)), ('document', '1c4b247a2858cea8d9d8750693688270')) +paddle.fluid.contrib.BasicGRUUnit.__init__ (ArgSpec(args=['self', 'name_scope', 'hidden_size', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'dtype'], varargs=None, keywords=None, defaults=(None, None, None, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicGRUUnit.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) +paddle.fluid.contrib.BasicGRUUnit.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) +paddle.fluid.contrib.BasicGRUUnit.backward (ArgSpec(args=['self'], varargs='inputs', keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicGRUUnit.clear_gradients (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicGRUUnit.create_parameter (ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'a6420ca1455366eaaf972191612de0b6')) +paddle.fluid.contrib.BasicGRUUnit.create_variable (ArgSpec(args=['self', 'name', 'persistable', 'dtype', 'type'], varargs=None, keywords=None, defaults=(None, None, None, VarType.LOD_TENSOR)), ('document', '171cccfceba636d5bbf7bbae672945d8')) +paddle.fluid.contrib.BasicGRUUnit.eval (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicGRUUnit.forward (ArgSpec(args=['self', 'input', 'pre_hidden'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicGRUUnit.full_name (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '23ce4f961f48ed0f79cadf93a3938ed2')) +paddle.fluid.contrib.BasicGRUUnit.load_dict (ArgSpec(args=['self', 'stat_dict', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicGRUUnit.parameters (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '5aec25a854eb57abc798dccccbb507d5')) +paddle.fluid.contrib.BasicGRUUnit.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicGRUUnit.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) +paddle.fluid.contrib.BasicGRUUnit.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.basic_gru (ArgSpec(args=['input', 'init_hidden', 'hidden_size', 'num_layers', 'sequence_length', 'dropout_prob', 'bidirectional', 'batch_first', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(1, None, 0.0, False, True, None, None, None, None, 'float32', 'basic_gru')), ('document', '0afcbe4fbe1b8c35eda58b4efe48f9fd')) +paddle.fluid.contrib.BasicLSTMUnit.__init__ (ArgSpec(args=['self', 'name_scope', 'hidden_size', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'forget_bias', 'dtype'], varargs=None, keywords=None, defaults=(None, None, None, None, 1.0, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicLSTMUnit.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) +paddle.fluid.contrib.BasicLSTMUnit.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) +paddle.fluid.contrib.BasicLSTMUnit.backward (ArgSpec(args=['self'], varargs='inputs', keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicLSTMUnit.clear_gradients (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicLSTMUnit.create_parameter (ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'a6420ca1455366eaaf972191612de0b6')) +paddle.fluid.contrib.BasicLSTMUnit.create_variable (ArgSpec(args=['self', 'name', 'persistable', 'dtype', 'type'], varargs=None, keywords=None, defaults=(None, None, None, VarType.LOD_TENSOR)), ('document', '171cccfceba636d5bbf7bbae672945d8')) +paddle.fluid.contrib.BasicLSTMUnit.eval (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicLSTMUnit.forward (ArgSpec(args=['self', 'input', 'pre_hidden', 'pre_cell'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicLSTMUnit.full_name (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '23ce4f961f48ed0f79cadf93a3938ed2')) +paddle.fluid.contrib.BasicLSTMUnit.load_dict (ArgSpec(args=['self', 'stat_dict', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicLSTMUnit.parameters (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '5aec25a854eb57abc798dccccbb507d5')) +paddle.fluid.contrib.BasicLSTMUnit.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.BasicLSTMUnit.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) +paddle.fluid.contrib.BasicLSTMUnit.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.contrib.basic_lstm (ArgSpec(args=['input', 'init_hidden', 'init_cell', 'hidden_size', 'num_layers', 'sequence_length', 'dropout_prob', 'bidirectional', 'batch_first', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'forget_bias', 'dtype', 'name'], varargs=None, keywords=None, defaults=(1, None, 0.0, False, True, None, None, None, None, 1.0, 'float32', 'basic_lstm')), ('document', 'fe4d0c3c55a162b8cfe10b05fabb7ce4')) paddle.fluid.dygraph.Layer.__init__ (ArgSpec(args=['self', 'name_scope', 'dtype'], varargs=None, keywords=None, defaults=(VarType.FP32,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Layer.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.Layer.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 4645b3ae6e..fdbd77a5c8 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -35,7 +35,9 @@ template struct SetConstant; template struct Transpose; \ template struct Transpose; \ template struct Transpose; \ - template struct Transpose; + template struct Transpose; \ + template struct Transpose; \ + template struct Transpose; DEFINE_GPU_TRANS(1); DEFINE_GPU_TRANS(2); diff --git a/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc index 7fc506aab4..33d24c11f9 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc @@ -13,6 +13,80 @@ // limitations under the License. #include "paddle/fluid/operators/sequence_ops/sequence_mask_op.h" +#include + +namespace paddle { +namespace operators { + +class SequenceMaskOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must exist"); + PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) must exist"); + + int maxlen = ctx->Attrs().Get("maxlen"); + auto dim = framework::vectorize2int(ctx->GetInputDim("X")); + + if (ctx->HasInputs("MaxLenTensor")) { + dim.push_back(-1); + } else { + dim.push_back(maxlen > 0 ? maxlen : -1); + } + ctx->SetOutputDim("Y", framework::make_ddim(dim)); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.device_context()); + } + framework::OpKernelType GetKernelTypeForVar( + const std::string& var_name, const Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const override { + if (var_name == "depth_tensor") { + return expected_kernel_type; + } + return framework::OpKernelType(expected_kernel_type.data_type_, + tensor.place(), tensor.layout()); + } +}; + +class SequenceMaskOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "The input tensor of sequence_mask op."); + AddOutput("Y", "The output mask of sequence_mask op."); + AddInput("MaxLenTensor", + "Max length tensor" + "have higher priority than maxlen attribute") + .AsDispensable(); + AddAttr("maxlen", + "The maximum length of the sequence. If maxlen < 0, maxlen " + "= max(Input(X)).") + .SetDefault(-1) + .AddCustomChecker([](const int& v) { + PADDLE_ENFORCE(v < 0 || v >= 1, + "Attr(maxlen) must be less than 0 or larger than 1"); + }); + AddAttr("out_dtype", "Output data type"); + AddComment(R"DOC( +SequenceMask Operator + +This operator outputs a Mask according to Input(X) and Attr(maxlen). +Supposing Input(X) is a Tensor with shape [d_1, d_2, ..., d_n], the +Output(Y) is a mask with shape [d_1, d_2, ..., d_n, maxlen], where: + +Y(i_1, i_2, ..., i_n, j) = (j < X(i_1, i_2, ..., i_n)) + +If maxlen < 0, maxlen = max(X) + )DOC"); + } +}; +} // namespace operators +} // namespace paddle REGISTER_OPERATOR(sequence_mask, paddle::operators::SequenceMaskOp, paddle::operators::SequenceMaskOpMaker, diff --git a/paddle/fluid/operators/sequence_ops/sequence_mask_op.h b/paddle/fluid/operators/sequence_ops/sequence_mask_op.h index 57d6f4b3ea..a92c5de632 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_mask_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_mask_op.h @@ -28,48 +28,8 @@ namespace paddle { namespace operators { -class SequenceMaskOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must exist"); - PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) must exist"); - - int maxlen = ctx->Attrs().Get("maxlen"); - auto dim = framework::vectorize2int(ctx->GetInputDim("X")); - dim.push_back(maxlen > 0 ? maxlen : -1); - ctx->SetOutputDim("Y", framework::make_ddim(dim)); - } -}; - -class SequenceMaskOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", "The input tensor of sequence_mask op."); - AddOutput("Y", "The output mask of sequence_mask op."); - AddAttr("maxlen", - "The maximum length of the sequence. If maxlen < 0, maxlen " - "= max(Input(X)).") - .SetDefault(-1) - .AddCustomChecker([](const int &v) { - PADDLE_ENFORCE(v < 0 || v >= 1, - "Attr(maxlen) must be less than 0 or larger than 1"); - }); - AddAttr("out_dtype", "Output data type"); - AddComment(R"DOC( -SequenceMask Operator - -This operator outputs a Mask according to Input(X) and Attr(maxlen). -Supposing Input(X) is a Tensor with shape [d_1, d_2, ..., d_n], the -Output(Y) is a mask with shape [d_1, d_2, ..., d_n, maxlen], where: - -Y(i_1, i_2, ..., i_n, j) = (j < X(i_1, i_2, ..., i_n)) - -If maxlen < 0, maxlen = max(X) - )DOC"); - } -}; +using LoDTensor = framework::LoDTensor; +using Tensor = framework::Tensor; template struct SequenceMaskForRangeFunctor { @@ -90,8 +50,6 @@ struct SequenceMaskForRangeFunctor { template struct SequenceMaskFunctor { - using Tensor = framework::LoDTensor; - SequenceMaskFunctor(const DeviceContext &ctx, const Tx *x, Tensor *y, int limits, int maxlen) : ctx_(ctx), x_(x), y_(y), limits_(limits), maxlen_(maxlen) {} @@ -119,7 +77,25 @@ class SequenceMaskKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { auto *x = ctx.Input("X"); auto *y = ctx.Output("Y"); - auto maxlen = ctx.Attr("maxlen"); + int maxlen = ctx.Attr("maxlen"); + if (ctx.HasInput("MaxLenTensor")) { + auto max_len_tensor = ctx.Input("MaxLenTensor"); + PADDLE_ENFORCE(max_len_tensor != NULL, "MaxLenTensor is NULL"); + if (platform::is_gpu_place(max_len_tensor->place())) { + framework::Tensor temp; + TensorCopySync(*max_len_tensor, platform::CPUPlace(), &temp); + maxlen = *temp.data(); + } else { + maxlen = *max_len_tensor->data(); + } + + auto y_dim = framework::vectorize2int(x->dims()); + y_dim.push_back(maxlen); + y->Resize(framework::make_ddim(y_dim)); + + PADDLE_ENFORCE_GT(maxlen, 0, + "MaxLenTensor value should be greater than 0"); + } auto *x_data = x->data(); auto x_numel = x->numel(); diff --git a/paddle/fluid/operators/shape_op.cc b/paddle/fluid/operators/shape_op.cc index efc497fa47..3715dd114d 100644 --- a/paddle/fluid/operators/shape_op.cc +++ b/paddle/fluid/operators/shape_op.cc @@ -55,4 +55,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(shape, ops::ShapeOp, ops::ShapeOpMaker, paddle::framework::EmptyGradOpMaker); REGISTER_OP_CPU_KERNEL(shape, ops::ShapeKernel, ops::ShapeKernel, - ops::ShapeKernel, ops::ShapeKernel); + ops::ShapeKernel, ops::ShapeKernel, + ops::ShapeKernel); diff --git a/paddle/fluid/operators/shape_op.cu b/paddle/fluid/operators/shape_op.cu index d8fa9515ab..2df4ad1339 100644 --- a/paddle/fluid/operators/shape_op.cu +++ b/paddle/fluid/operators/shape_op.cu @@ -16,5 +16,6 @@ limitations under the License. */ REGISTER_OP_CUDA_KERNEL(shape, paddle::operators::ShapeKernel, paddle::operators::ShapeKernel, + paddle::operators::ShapeKernel, paddle::operators::ShapeKernel, paddle::operators::ShapeKernel); diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc index 6788b2e212..6eb6183447 100644 --- a/paddle/fluid/operators/slice_op.cc +++ b/paddle/fluid/operators/slice_op.cc @@ -92,7 +92,7 @@ class SliceOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType(ctx.Input("Input")->type(), - ctx.GetPlace()); + ctx.Input("Input")->place()); } }; diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index db14d350c7..47840d71a3 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/transpose_op.h" +#include #include #include @@ -289,8 +290,12 @@ REGISTER_OPERATOR(transpose2_grad, ops::Transpose2OpGrad); REGISTER_OP_CPU_KERNEL( transpose2, ops::TransposeKernel, + ops::TransposeKernel, + ops::TransposeKernel, ops::TransposeKernel); REGISTER_OP_CPU_KERNEL( transpose2_grad, + ops::TransposeGradKernel, + ops::TransposeGradKernel, ops::TransposeGradKernel, ops::TransposeGradKernel); diff --git a/paddle/fluid/operators/transpose_op.cu.cc b/paddle/fluid/operators/transpose_op.cu.cc index 915774e5f3..debf9bce55 100644 --- a/paddle/fluid/operators/transpose_op.cu.cc +++ b/paddle/fluid/operators/transpose_op.cu.cc @@ -30,11 +30,15 @@ REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL( transpose2, + ops::TransposeKernel, + ops::TransposeKernel, ops::TransposeKernel, ops::TransposeKernel, ops::TransposeKernel); REGISTER_OP_CUDA_KERNEL( transpose2_grad, + ops::TransposeGradKernel, + ops::TransposeGradKernel, ops::TransposeGradKernel, ops::TransposeGradKernel, ops::TransposeGradKernel