From fe70c69fd5d883d2af6b5eefdc109cb4f993935f Mon Sep 17 00:00:00 2001 From: yangyaming Date: Wed, 9 May 2018 14:12:46 +0000 Subject: [PATCH] Add forward and backward. --- paddle/fluid/operators/sequence_pad_op.cc | 131 ++++++++++++++++++++++ paddle/fluid/operators/sequence_pad_op.cu | 23 ++++ paddle/fluid/operators/sequence_pad_op.h | 97 ++++++++++++++++ 3 files changed, 251 insertions(+) create mode 100644 paddle/fluid/operators/sequence_pad_op.cc create mode 100644 paddle/fluid/operators/sequence_pad_op.cu create mode 100644 paddle/fluid/operators/sequence_pad_op.h diff --git a/paddle/fluid/operators/sequence_pad_op.cc b/paddle/fluid/operators/sequence_pad_op.cc new file mode 100644 index 00000000000..183d38fcc9a --- /dev/null +++ b/paddle/fluid/operators/sequence_pad_op.cc @@ -0,0 +1,131 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/sequence_pad_op.h" + +namespace paddle { +namespace operators { + +class SequencePadOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SequencePadOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of SequencePadOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + + PADDLE_ENFORCE_EQ(x_dims.size(), 2, + "Only support 2-D tensor, rank of Input(X) should be 2."); + + auto out_dims = x_dims; + + if (ctx->IsRuntime()) { + framework::Variable* x_var = + boost::get(ctx->GetInputVarPtrs("X")[0]); + + auto& x_lod = x_var->Get().lod(); + + PADDLE_ENFORCE_GE(x_lod.size(), 1, + "Input(X) should be sequences containing lod."); + + auto last_level_lod = x_lod[x_lod.size() - 1]; + size_t max_len = 0; + + for (size_t i = 1; i < last_level_lod.size(); ++i) { + auto seq_len = last_level_lod[i] - last_level_lod[i - 1]; + max_len = max_len < seq_len ? seq_len : max_len; + } + + out_dims[0] = max_len * (last_level_lod.size() - 1); + } else { + framework::VarDesc* x_desc = + boost::get(ctx->GetInputVarPtrs("X")[0]); + PADDLE_ENFORCE_GE(x_desc->GetLoDLevel(), 1, + "Input(X) should be sequences containing lod."); + out_dims[0] = -1; + } + + ctx->SetOutputDim("Out", out_dims); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; + +class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SequencePadOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "(LoDTensor, default LoDTensor) Input variable which " + "should contain lod information. Length of each sequence would " + "be computed from the most bottom level lod."); + AddOutput("Out", + "(Tensor) Output variable which would be a common tensor " + "without lod. Each sequence would be padded to the maximum " + "length."); + AddAttr("pad_value", + "(float, default 0.0) Value to be padded " + "to the end of each sequence."); + AddComment(R"DOC( + + )DOC"); + } +}; + +class SequencePadGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SequencePadGradOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) of SequencePadGradOp should not be null."); + + if (ctx->HasOutput(framework::GradVarName("X"))) { + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ framework::GradVarName("X")); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(sequence_pad, ops::SequencePadOp, ops::SequencePadOpMaker, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(sequence_pad_grad, ops::SequencePadGradOp); +REGISTER_OP_CPU_KERNEL( + sequence_pad, + ops::SequencePadOpKernel, + ops::SequencePadOpKernel, + ops::SequencePadOpKernel, + ops::SequencePadOpKernel); +REGISTER_OP_CPU_KERNEL( + sequence_pad_grad, + ops::SequencePadGradOpKernel, + ops::SequencePadGradOpKernel, + ops::SequencePadGradOpKernel, + ops::SequencePadGradOpKernel); diff --git a/paddle/fluid/operators/sequence_pad_op.cu b/paddle/fluid/operators/sequence_pad_op.cu new file mode 100644 index 00000000000..a2fa62957ea --- /dev/null +++ b/paddle/fluid/operators/sequence_pad_op.cu @@ -0,0 +1,23 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/sequence_pad_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + sequence_pad, + ops::SequencePadOpKernel); +REGISTER_OP_CUDA_KERNEL( + sequence_pad_grad, + ops::SequencePadGradOpKernel); diff --git a/paddle/fluid/operators/sequence_pad_op.h b/paddle/fluid/operators/sequence_pad_op.h new file mode 100644 index 00000000000..b36465d8e71 --- /dev/null +++ b/paddle/fluid/operators/sequence_pad_op.h @@ -0,0 +1,97 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using LoDTensor = framework::LoDTensor; +using LoD = framework::LoD; + +// @TODO clean code +template +class SequencePadOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x_ptr = ctx.Input("X"); + auto* out_ptr = ctx.Output("Out"); + + out_ptr->mutable_data(ctx.GetPlace()); + + T pad_value = static_cast(ctx.Attr("pad_value")); + + math::SetConstant set_func; + set_func(ctx.template device_context(), out_ptr, pad_value); + + auto& x_lod = x_ptr->lod(); + auto& x_last_level_lod = x_lod[x_lod.size() - 1]; + auto seq_num = x_last_level_lod.size() - 1; + auto max_len = out_ptr->dims()[0] / seq_num; + + PADDLE_ENFORCE_EQ(max_len * seq_num, out_ptr->dims()[0], + "First dimension of `Out` should be equal to " + "maximum length mulplied by sequence number."); + + for (size_t i = 1; i < x_last_level_lod.size(); ++i) { + auto x_start = x_last_level_lod[i - 1]; + auto x_end = x_last_level_lod[i]; + auto out_start = (i - 1) * max_len; + auto out_end = out_start + (x_end - x_start); + auto x_sub_tensor = x_ptr->Slice(x_start, x_end); + auto out_sub_tensor = out_ptr->Slice(out_start, out_end); + framework::TensorCopy(x_sub_tensor, ctx.GetPlace(), &out_sub_tensor); + } + } +}; + +template +class SequencePadGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x_ptr = ctx.Input("X"); + auto* g_out_ptr = ctx.Input(framework::GradVarName("Out")); + auto* g_x_ptr = ctx.Output(framework::GradVarName("X")); + + math::SetConstant set_func; + set_func(ctx.template device_context(), g_x_ptr, + static_cast(0)); + + auto& x_lod = x_ptr->lod(); + auto& x_last_level_lod = x_lod[x_lod.size() - 1]; + auto seq_num = x_last_level_lod.size() - 1; + int64_t max_len = g_out_ptr->dims()[0] / seq_num; + + PADDLE_ENFORCE_EQ(max_len * seq_num, g_out_ptr->dims()[0], + "First dimension of `Out` should be equal to " + "maximum length mulplied by sequence number."); + + for (size_t i = 1; i < x_last_level_lod.size(); ++i) { + auto x_start = x_last_level_lod[i - 1]; + auto x_end = x_last_level_lod[i]; + auto out_start = (i - 1) * max_len; + auto out_end = out_start + (x_end - x_start); + + auto g_out_sub = g_out_ptr->Slice(out_start, out_end); + auto g_x_sub = g_x_ptr->Slice(x_start, x_end); + framework::TensorCopy(g_x_sub, ctx.GetPlace(), &g_out_sub); + } + } +}; + +} // namespace operators +} // namespace paddle -- GitLab