From 901b041196f006cd1fc4775a87849e6e716b6c62 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 11 Oct 2017 23:09:45 +0800 Subject: [PATCH] Add seq_expand op 1. Add unitest 2. Add SeqExpandOpKernel --- paddle/operators/seq_expand_op.cc | 125 ++++++++++++++++++ paddle/operators/seq_expand_op.cu | 23 ++++ paddle/operators/seq_expand_op.h | 83 ++++++++++++ .../v2/framework/tests/test_seq_expand.py | 61 +++++++++ 4 files changed, 292 insertions(+) create mode 100644 paddle/operators/seq_expand_op.cc create mode 100644 paddle/operators/seq_expand_op.cu create mode 100644 paddle/operators/seq_expand_op.h create mode 100644 python/paddle/v2/framework/tests/test_seq_expand.py diff --git a/paddle/operators/seq_expand_op.cc b/paddle/operators/seq_expand_op.cc new file mode 100644 index 00000000000..894ba3f6b70 --- /dev/null +++ b/paddle/operators/seq_expand_op.cc @@ -0,0 +1,125 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/seq_expand_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class SeqExpandOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SeqExpandOp should not be null."); + int repeat = ctx->Attrs().Get("repeat"); + DDim out_dim; + if (repeat == 0) { + PADDLE_ENFORCE( + ctx->HasInput("Y"), + "Input(Y) of SeqExpandOp should not be null while repeat == 0."); + out_dim = ctx->GetInputDim("Y"); + ctx->ShareLoD("Y", "Out"); + } else { + out_dim = ctx->GetInputDim("X"); + out_dim[0] = out_dim[0] * repeat; + ctx->SetOutputDim("Out", y_dim); + } + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of PadOp should not be null."); + ctx->SetOutputDim("Out", out_dim); + } +}; + +class SeqExpandOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SeqExpandOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + // TODO(wanghaoshuang): Add more comments + AddInput("X", "The input('X') of seq_expand op."); + AddInput("Y", "The reference input('Y') of seq_expand op."); + AddOutput("Out", "The output of seq_expand op."); + AddAttr("repeat", "repeat times").SetDefault(0); + AddComment(R"DOC( +As an example: + +Given: + +X = [1, 2 , 3] + +and + +repeat = 2 + + +then we get + +Out.data = [1, 1, 2, 2, 3, 3] +Out.lod = [[0, 2, 4, 6]] + +)DOC"); + } +}; + +class SeqExpandOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + auto x_dims = ctx->GetInputDim("X"); + auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, x_dims); + } + } +}; + +class SeqExpandOpGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* bind = new framework::OpDescBind(); + bind->SetInput("X", Input("X")); + bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + bind->SetOutput(framework::GradVarName("X"), InputGrad("X")); + bind->SetAttrMap(Attrs()); + bind->SetType("seq_expand_grad"); + return std::unique_ptr(bind); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(seq_expand, ops::SeqExpandOp, ops::SeqExpandOpMaker, + ops::SeqExpandOpGradMaker); +REGISTER_OPERATOR(seq_expand_grad, ops::SeqExpandOpGrad); +REGISTER_OP_CPU_KERNEL(seq_expand, + ops::SeqExpandKernel); +REGISTER_OP_CPU_KERNEL( + seq_expand_grad, + ops::SeqExpandGradKernel); diff --git a/paddle/operators/seq_expand_op.cu b/paddle/operators/seq_expand_op.cu new file mode 100644 index 00000000000..f1e4b82a76e --- /dev/null +++ b/paddle/operators/seq_expand_op.cu @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/seq_expand_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(seq_expand, + ops::SeqExpandKernel); +REGISTER_OP_GPU_KERNEL( + seq_expand_grad, + ops::SeqExpandGradKernel); diff --git a/paddle/operators/seq_expand_op.h b/paddle/operators/seq_expand_op.h new file mode 100644 index 00000000000..80076dc35fe --- /dev/null +++ b/paddle/operators/seq_expand_op.h @@ -0,0 +1,83 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "hl_cuda.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using LoDTensor = framework::LoDTensor; +using LoD = paddle::framework::LoD; + +template +class SeqExpandKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* x = context.Input("X"); + auto* out = context.Output("Out"); + const T* x_data = x->data(); + T* out_data = out->mutable_data(context.GetPlace()); + size_t repeat = static_cast(context.Attr("repeat")); + + if (repeat != 0) { + if (x->lod().size() == 0) { + std::vector level0(x->dims()[0]); + for (size_t i = 0; i <= x->dims()[0]; i++) { + level0.push_back(i * repeat); + } + const LoD out_lod; + out_lod.push_back(level0); + out->set_lod(out_lod); + } + } + auto out_dim = out->dims(); + size_t element_len = framework::product(out_dim) / out_dim[0]; + std::vector cpy_map(out_dim[0]); + if (x->lod().size() == 0) { + auto lod = out->lod(); + for (int i = 0; i < lod.size() - 1; ++i) { + for (int j = lod[0][i]; i < lod[0][i + 1]; ++j) { + cpy_map[j] = i; + } + } + } + if (paddle::platform::CPUPlace() == Place) { + for (int i = 0; i < out_dim[0]; ++i) { + memcpy(out_data + element_len * i, x_data + element_len * cpy_map[i], + sizeof(T) * element_len); + } + } else { + for (int i = 0; i < out_dim[0]; ++i) { + hl_memcpy(out_data + element_len * i, x_data + element_len * cpy_map[i], + sizeof(T) * element_len); + } + } + } +}; + +template +class SeqExpandGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + // auto* d_out = context.Input(framework::GradVarName("Out")); + // auto* d_x = context.Output(framework::GradVarName("X")); + // d_x->mutable_data(context.GetPlace()); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_seq_expand.py b/python/paddle/v2/framework/tests/test_seq_expand.py new file mode 100644 index 00000000000..4608d3c3bd6 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_seq_expand.py @@ -0,0 +1,61 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestSeqExpand(OpTest): + #class TestSeqExpand(): + def set_data(self): + self.op_type = 'seq_expand' + x = np.random.uniform(0.1, 1, [3, 2, 2]).astype('float32') + y = np.zeros((6, 2, 2)).astype('float32') + lod = [[0, 2, 3, 6]] + print "x = %s" % x + self.inputs = {'X': x, 'Y': (y, lod)} + self.repeat = None + + def compute(self): + x = self.inputs['X'] + cpy_map = {} + lod = [] + out_shape = [] + if self.repeat: + level0 = [] + for i in range(x.shape[0] + 1): + level0.append(i * self.repeat) + lod.append(level0) + + for i in x.shape: + out_shape.append(i) + out_shape[0] = out_shape[0] * self.repeat + else: + y, lod = self.inputs['Y'] + out_shape = y.shape + out = np.zeros(out_shape).astype('float32') + + start = 0 + + for i in range(len(lod[0]) - 1): + for j in range(lod[0][i], lod[0][i + 1]): + cpy_map[j] = i + print "cpy_map = %s" % cpy_map + for i in range(len(out)): + out[i] = x[cpy_map[i]] + + print "out = %s" % out + self.outputs = {'Out': (out, lod)} + + def setUp(self): + self.set_data() + self.compute() + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +if __name__ == '__main__': + unittest.main() +# TestSeqExpand().setUp() -- GitLab