diff --git a/src/operators/elementwise_sub_op.cpp b/src/operators/elementwise_sub_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e5ec33ced29f02a524350ed907ef69f2a5dbfca8 --- /dev/null +++ b/src/operators/elementwise_sub_op.cpp @@ -0,0 +1,41 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef ELEMENTWISESUB_OP + +#include "operators/elementwise_sub_op.h" + +namespace paddle_mobile { +namespace operators { + +template +void ElementwiseSubOp::InferShape() const { + auto x_dim = this->param_.InputX()->dims(); + this->param_.Out()->Resize(x_dim); +} + +} // namespace operators +} // namespace paddle_mobile + +namespace ops = paddle_mobile::operators; +#ifdef PADDLE_MOBILE_CPU +REGISTER_OPERATOR_CPU(elementwise_sub, ops::ElementwiseSubOp); +#endif +#ifdef PADDLE_MOBILE_MALI_GPU +REGISTER_OPERATOR_MALI_GPU(elementwise_sub, ops::ElementwiseSubOp); +#endif +#ifdef PADDLE_MOBILE_FPGA +#endif + +#endif diff --git a/src/operators/elementwise_sub_op.h b/src/operators/elementwise_sub_op.h new file mode 100644 index 0000000000000000000000000000000000000000..2edd2581a9d3929a29459df60f514132796a53e2 --- /dev/null +++ b/src/operators/elementwise_sub_op.h @@ -0,0 +1,51 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef ELEMENTWISESUB_OP + +#pragma once + +#include +#include "framework/operator.h" +#include "kernel/elementwise_sub_kernel.h" +#include "operators/op_param.h" + +namespace paddle_mobile { +namespace operators { +using std::string; +template +class ElementwiseSubOp : public framework::OperatorWithKernel< + DeviceType, ElementwiseSubParam, + operators::ElementwiseSubKernel> { + public: + ElementwiseSubOp(const string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, + const framework::AttributeMap &attrs, + std::shared_ptr scope) + : framework::OperatorWithKernel< + DeviceType, ElementwiseSubParam, + operators::ElementwiseSubKernel>( + type, inputs, outputs, attrs, scope) {} + + using framework::OperatorWithKernel< + DeviceType, ElementwiseSubParam, + operators::ElementwiseSubKernel>::OperatorWithKernel; + void InferShape() const override; + + protected: +}; +} // namespace operators +} // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/elementwise_sub_kernel.cpp b/src/operators/kernel/arm/elementwise_sub_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d78b3e31098ef7ef929a0d2c00043fab7193b01c --- /dev/null +++ b/src/operators/kernel/arm/elementwise_sub_kernel.cpp @@ -0,0 +1,38 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef ELEMENTWISESUB_OP + +#include "operators/kernel/elementwise_sub_kernel.h" +#include "operators/kernel/central-arm-func/elementwise_sub_arm_func.h" + +namespace paddle_mobile { +namespace operators { + +template <> +bool ElementwiseSubKernel::Init(ElementwiseSubParam *param) { + return true; +} + +template <> +void ElementwiseSubKernel::Compute( + const ElementwiseSubParam ¶m) const { + ElementwiseSubCompute(param); + param.Out()->set_lod(param.InputX()->lod()); +} + +} // namespace operators +} // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/central-arm-func/elementwise_sub_arm_func.h b/src/operators/kernel/central-arm-func/elementwise_sub_arm_func.h new file mode 100644 index 0000000000000000000000000000000000000000..663c65c83a0f5b76e292925ea8cb0994b0f99ad1 --- /dev/null +++ b/src/operators/kernel/central-arm-func/elementwise_sub_arm_func.h @@ -0,0 +1,45 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef ELEMENTWISESUB_OP + +#pragma once +#include "operators/math/elementwise_op_function.h" +#include "operators/op_param.h" + +namespace paddle_mobile { +namespace operators { + +template +struct SubFunctor { + inline T operator()(T a, T b) const { return a - b; } +}; + +template +void ElementwiseSubCompute(const ElementwiseSubParam ¶m) { + const Tensor *input_x = param.InputX(); + const Tensor *input_y = param.InputY(); + Tensor *Out = param.Out(); + Out->mutable_data(); + int axis = param.Axis(); + ElementwiseComputeEx, float>(input_x, input_y, axis, + SubFunctor(), Out); +} + +template class ElementwiseSubKernel; + +} // namespace operators +} // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/elementwise_sub_kernel.h b/src/operators/kernel/elementwise_sub_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..9516dcbd3de09debe233571eb5f60b3b8b19a2fa --- /dev/null +++ b/src/operators/kernel/elementwise_sub_kernel.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef ELEMENTWISEADD_OP + +#pragma once + +#include "framework/operator.h" +#include "operators/math/elementwise_op_function.h" +#include "operators/op_param.h" + +namespace paddle_mobile { +namespace operators { + +template +class ElementwiseSubKernel + : public framework::OpKernelBase> { + public: + void Compute(const ElementwiseSubParam ¶m) const; + bool Init(ElementwiseSubParam *param); +}; + +} // namespace operators +} // namespace paddle_mobile + +#endif diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 27ab4629f011ba25390961b2679fd8f86d213fc3..f4b2696b4ea2753a900b83bd1530d6ba6ea9075f 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -488,6 +488,36 @@ template using ElementwiseAddReluParam = ElementwiseAddParam; #endif +template +class ElementwiseSubParam : OpParam { + typedef typename DtypeTensorTrait::gtype GType; + typedef typename DtypeTensorTrait::rtype RType; + + public: + ElementwiseSubParam(const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs, + const Scope &scope) { + input_x_ = InputXFrom(inputs, scope); + input_y_ = InputYFrom(inputs, scope); + out_ = OutFrom(outputs, scope); + axis_ = GetAttr("axis", attrs); + } + + const GType *InputX() const { return input_x_; } + + const GType *InputY() const { return input_y_; } + + GType *Out() const { return out_; } + + const int &Axis() const { return axis_; } + + private: + GType *input_x_; + GType *input_y_; + GType *out_; + int axis_; +}; + #ifdef MUL_OP template class MulParam : OpParam { diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index ad3b2e5fe95abbec76380af3addca7b769ba3e34..1893491f1163c42784af1213b6581ae7817f86b2 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -173,6 +173,14 @@ if (NOT FOUND_MATCH) target_link_libraries(test-elementwiseadd-op paddle-mobile) # gen test + ADD_EXECUTABLE(test-elementwisesub-op operators/test_elementwise_sub_op.cpp test_helper.h test_include.h) + target_link_libraries(test-elementwisesub-op paddle-mobile) + + # gen test + ADD_EXECUTABLE(test-im2sequence-op operators/test_im2sequence_op.cpp test_helper.h test_include.h) + target_link_libraries(test-im2sequence-op paddle-mobile) + + # gen test ADD_EXECUTABLE(test-concat-op operators/test_concat_op.cpp test_helper.h test_include.h) target_link_libraries(test-concat-op paddle-mobile) diff --git a/test/operators/test_elementwise_sub_op.cpp b/test/operators/test_elementwise_sub_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cfac83eff7a012d52d47f96e088bd8519603cadc --- /dev/null +++ b/test/operators/test_elementwise_sub_op.cpp @@ -0,0 +1,159 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "../test_helper.h" +#include "../test_include.h" +#include "operators/elementwise_sub_op.h" + +namespace paddle_mobile { +namespace framework { + +template +class TestElementwiseSubOp { + public: + explicit TestElementwiseSubOp(const Program p) : program_(p) { + if (use_optimize_) { + to_predict_program_ = program_.optimizeProgram; + } else { + to_predict_program_ = program_.originProgram; + } + + const std::vector> blocks = + to_predict_program_->Blocks(); + // DLOG << " **block size " << blocks.size(); + for (int i = 0; i < blocks.size(); ++i) { + std::shared_ptr block_desc = blocks[i]; + std::vector> ops = block_desc->Ops(); + // DLOG << " ops " << ops.size(); + for (int j = 0; j < ops.size(); ++j) { + std::shared_ptr op = ops[j]; + if (op->Type() == "elementwise_sub" && + op->Input("X")[0] == "sigmoid_1.tmp_0") { + DLOG << " elementwise_sub attr size: " << op->GetAttrMap().size(); + DLOG << " inputs size: " << op->GetInputs().size(); + DLOG << " outputs size: " << op->GetOutputs().size(); + + std::shared_ptr> lrn = + std::make_shared>( + op->Type(), op->GetInputs(), op->GetOutputs(), + op->GetAttrMap(), program_.scope); + ops_of_block_[*block_desc.get()].push_back(lrn); + } + } + } + } + + std::shared_ptr predict_bn(const Tensor &t1, const Tensor &t2) { + // feed + auto scope = program_.scope; + Variable *x1_feed_value = scope->Var("tmp_0"); + auto tensor_x1 = x1_feed_value->GetMutable(); + tensor_x1->ShareDataWith(t1); + + Variable *x2_feed_value = scope->Var("sigmoid_1.tmp_0"); + auto tensor_x2 = x2_feed_value->GetMutable(); + tensor_x2->ShareDataWith(t2); + + Variable *output = scope->Var("tmp_1"); + auto *output_tensor = output->GetMutable(); + output_tensor->mutable_data({1, 1, 6, 6}); + // DLOG << typeid(output_tensor).name(); + // DLOG << "output_tensor dims: " << output_tensor->dims(); + + std::shared_ptr out_tensor = std::make_shared(); + out_tensor.reset(output_tensor); + + predict_bn(t1, t2, 0); + return out_tensor; + } + + private: + const framework::Program program_; + std::shared_ptr to_predict_program_; + std::map>>> + ops_of_block_; + bool use_optimize_ = false; + + void predict_bn(const Tensor &t1, const Tensor &t2, int block_id) { + std::shared_ptr to_predict_block = + to_predict_program_->Block(block_id); + for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { + auto op = ops_of_block_[*to_predict_block.get()][j]; + DLOG << "op -> run()"; + op->Run(); + } + } +}; + +template class TestElementwiseSubOp; +} // namespace framework +} // namespace paddle_mobile + +int main() { + DLOG << "----------**********----------"; + DLOG << "begin to run ElementwiseSub Test"; + paddle_mobile::Loader loader; + auto program = loader.Load(std::string(g_ocr) + "/model", + std::string(g_ocr) + "/params"); + + /// input x1 (1,1,6,6) + paddle_mobile::framework::Tensor inputx1; + SetupTensor(&inputx1, {1, 1, 6, 6}, static_cast(0), + static_cast(1)); + auto *inputx1_ptr = inputx1.data(); + + /// input x2 (1,1,6,6) + paddle_mobile::framework::Tensor inputx2; + SetupTensor(&inputx2, {1, 1, 6, 6}, static_cast(0), + static_cast(1)); + auto *inputx2_ptr = inputx2.data(); + + paddle_mobile::framework::TestElementwiseSubOp + testElementwiseSubOp(program); + + auto output_op = testElementwiseSubOp.predict_bn(inputx1, inputx2); + auto *output_op_ptr = output_op->data(); + + auto inputx1_dim = inputx1.numel() / inputx1.dims()[0]; + DLOG << " input1 : "; + for (int i = 0; i < inputx1.dims()[0]; ++i) { + for (int j = 0; j < inputx1_dim; ++j) { + DLOGF("%f ", inputx1_ptr[i * inputx1_dim + j]); + } + DLOGF("\n"); + } + + auto inputx2_dim = inputx2.numel() / inputx2.dims()[0]; + DLOG << " input2 : "; + for (int i = 0; i < inputx2.dims()[0]; ++i) { + for (int j = 0; j < inputx2_dim; ++j) { + DLOGF("%f ", inputx2_ptr[i * inputx2_dim + j]); + } + DLOGF("\n"); + } + + auto output_dim = output_op->numel() / output_op->dims()[0]; + DLOG << " output : "; + for (int i = 0; i < output_op->dims()[0]; ++i) { + for (int j = 0; j < output_dim; ++j) { + DLOGF("%f ", output_op_ptr[i * output_dim + j]); + } + DLOGF("\n"); + } + + return 0; +} diff --git a/test/operators/test_im2sequence_op.cpp b/test/operators/test_im2sequence_op.cpp index a7512d3bf3cffcb100fe292e50fc7b7b23fa0aa0..b45e437e12f95cd9f7050247fc03a152246d8122 100644 --- a/test/operators/test_im2sequence_op.cpp +++ b/test/operators/test_im2sequence_op.cpp @@ -12,51 +12,129 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "../executor_for_test.h" +#pragma once + +#include "../test_helper.h" #include "../test_include.h" #include "operators/im2sequence_op.h" -int main() { - paddle_mobile::Loader loader; - auto program = loader.Load(g_ocr_recg); - PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, - "program file read fail"); +namespace paddle_mobile { +namespace framework { - Executor4Test> - executor(program, "im2sequence"); +template +class TestIm2SequenceOp { + public: + explicit TestIm2SequenceOp(const Program p) : program_(p) { + if (use_optimize_) { + to_predict_program_ = program_.optimizeProgram; + } else { + to_predict_program_ = program_.originProgram; + } - // 1. input_tensors; - vector input_tensors; + const std::vector> blocks = + to_predict_program_->Blocks(); + // DLOG << " **block size " << blocks.size(); + for (int i = 0; i < blocks.size(); ++i) { + std::shared_ptr block_desc = blocks[i]; + std::vector> ops = block_desc->Ops(); + // DLOG << " ops " << ops.size(); + for (int j = 0; j < ops.size(); ++j) { + std::shared_ptr op = ops[j]; + if (op->Type() == "im2sequence" && + op->Input("X")[0] == "conv2d_19.tmp_1") { + DLOG << " im2squence attr size: " << op->GetAttrMap().size(); + DLOG << " inputs size: " << op->GetInputs().size(); + DLOG << " outputs size: " << op->GetOutputs().size(); - Tensor input1; - auto input1_data = CreateInput(&input1, {2, 2, 3, 3}, -1, 1); - input_tensors.push_back(input1); + std::shared_ptr> lrn = + std::make_shared>( + op->Type(), op->GetInputs(), op->GetOutputs(), + op->GetAttrMap(), program_.scope); + ops_of_block_[*block_desc.get()].push_back(lrn); + } + } + } + } - // 2. input_names - vector input_names({ - "conv2d_19.tmp_1", - }); + std::shared_ptr predict_bn(const Tensor &t1) { + // feed + auto scope = program_.scope; + Variable *x1_feed_value = scope->Var("conv2d_19.tmp_1"); + auto tensor_x1 = x1_feed_value->GetMutable(); + tensor_x1->ShareDataWith(t1); - // 3. output_names - vector output_names({"im2sequence_0.tmp_0"}); + Variable *output = scope->Var("im2sequence_0.tmp_0"); + auto *output_tensor = output->GetMutable(); + output_tensor->mutable_data({2, 12}); + // DLOG << typeid(output_tensor).name(); + // DLOG << "output_tensor dims: " << output_tensor->dims(); - // 4. out_dims; - vector out_ddims; - auto out_ddim = paddle_mobile::framework::make_ddim({8, 9}); - out_ddims.push_back(out_ddim); + std::shared_ptr out_tensor = std::make_shared(); + out_tensor.reset(output_tensor); - auto output = executor.Predict(input_tensors, input_names, - output_names, out_ddims); + predict_bn(t1, 0); + return out_tensor; + } - auto output0_data = output[0]->data(); + private: + const framework::Program program_; + std::shared_ptr to_predict_program_; + std::map>>> + ops_of_block_; + bool use_optimize_ = false; - for (int j = 0; j < input_tensors[0].numel(); ++j) { - DLOG << " value of input: " << input1_data[j]; + void predict_bn(const Tensor &t1, int block_id) { + std::shared_ptr to_predict_block = + to_predict_program_->Block(block_id); + for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { + auto op = ops_of_block_[*to_predict_block.get()][j]; + DLOG << "op -> run()"; + op->Run(); + } } +}; + +template class TestIm2SequenceOp; +} // namespace framework +} // namespace paddle_mobile - for (int j = 0; j < output[0]->numel(); ++j) { - DLOG << " value of output: " << output0_data[j]; +int main() { + DLOG << "----------**********----------"; + DLOG << "begin to run Im2Sequence Test"; + paddle_mobile::Loader loader; + auto program = loader.Load(std::string(g_eng) + "/model", + std::string(g_eng) + "/params"); + + /// input x (4,10,2,2) + paddle_mobile::framework::Tensor inputx; + SetupTensor(&inputx, {1, 2, 6, 2}, static_cast(0), + static_cast(1)); + auto *inputx_ptr = inputx.data(); + + paddle_mobile::framework::TestIm2SequenceOp + testIm2SequenceOp(program); + + auto output_op = testIm2SequenceOp.predict_bn(inputx); + auto *output_op_ptr = output_op->data(); + + auto input_dim = inputx.numel() / inputx.dims()[0]; + DLOG << " input : "; + for (int i = 0; i < inputx.dims()[0]; ++i) { + for (int j = 0; j < input_dim; ++j) { + DLOGF("%f ", inputx_ptr[i * input_dim + j]); + } + DLOGF("\n"); } + + auto output_dim = output_op->numel() / output_op->dims()[0]; + DLOG << " output : "; + for (int i = 0; i < output_op->dims()[0]; ++i) { + for (int j = 0; j < output_dim; ++j) { + DLOGF("%f ", output_op_ptr[i * output_dim + j]); + } + DLOGF("\n"); + } + return 0; } diff --git a/tools/op.cmake b/tools/op.cmake index 5d5567a524ae69bfb4668ff6078621eb4cb5920d..d0953f2bb655a6fd6be47c830d63b518b76780c3 100644 --- a/tools/op.cmake +++ b/tools/op.cmake @@ -188,6 +188,8 @@ if(NOT FOUND_MATCH) set(CONV_OP ON) set(DEPTHWISECONV_OP ON) set(ELEMENTWISEADD_OP ON) + set(ELEMENTWISESUB_OP ON) + set(IM2SEQUENCE_OP ON) set(FUSION_CONVADD_OP ON) set(FUSION_CONVADDPRELU_OP ON) set(FUSION_CONVADDRELU_OP ON) @@ -263,6 +265,9 @@ endif() if (ELEMENTWISEADD_OP) add_definitions(-DELEMENTWISEADD_OP) endif() +if (ELEMENTWISESUB_OP) + add_definitions(-DELEMENTWISESUB_OP) +endif() if (FUSION_CONVADD_OP) add_definitions(-DFUSION_CONVADD_OP) endif()