From 70c05971a98f510d824cf4877d68ec03a49ab1f7 Mon Sep 17 00:00:00 2001 From: wangliu Date: Sat, 19 May 2018 18:39:51 +0800 Subject: [PATCH] fix #210 add pool& pool test fix #211 add executor for testing op --- src/framework/executor.cpp | 43 +++++----- src/framework/executor.h | 7 +- src/framework/operator.h | 5 +- src/io.cpp | 3 + src/operators/conv_op.h | 4 +- src/operators/kernel/arm/conv_kernel.cpp | 5 +- src/operators/kernel/arm/pool_kernel.cpp | 77 +++++++++++++++++ src/operators/kernel/conv_kernel.h | 2 +- src/operators/kernel/pool_kernel.h | 35 ++++++++ src/operators/math/pool3x3.h | 30 +++++++ src/operators/math/pool_2x2.h | 30 +++++++ src/operators/math/pooling.cpp | 100 +++++++++++++++++++++++ src/operators/math/pooling.h | 69 ++++++++++++++++ src/operators/op_param.h | 45 +++++++++- src/operators/pool_op.cpp | 60 ++++++++++++++ src/operators/pool_op.h | 52 ++++++++++++ test/CMakeLists.txt | 8 +- test/framework/executor_for_test.cpp | 76 +++++++++++++++++ test/framework/executor_for_test.h | 35 ++++++++ test/operators/test_cov_op.cpp | 46 ++++------- test/operators/test_pool_op.cpp | 46 +++++++++++ test/test_helper.h | 1 + 22 files changed, 714 insertions(+), 65 deletions(-) create mode 100644 src/operators/kernel/arm/pool_kernel.cpp create mode 100644 src/operators/kernel/pool_kernel.h create mode 100644 src/operators/math/pool3x3.h create mode 100644 src/operators/math/pool_2x2.h create mode 100644 src/operators/math/pooling.cpp create mode 100644 src/operators/math/pooling.h create mode 100644 src/operators/pool_op.cpp create mode 100644 src/operators/pool_op.h create mode 100644 test/framework/executor_for_test.cpp create mode 100644 test/framework/executor_for_test.h create mode 100644 test/operators/test_pool_op.cpp diff --git a/src/framework/executor.cpp b/src/framework/executor.cpp index b0e6d64be2..2b60527846 100644 --- a/src/framework/executor.cpp +++ b/src/framework/executor.cpp @@ -32,26 +32,28 @@ Executor::Executor(const Program p) : program_(p) { to_predict_program_ = program_.originProgram; } - const std::vector> blocks = - to_predict_program_->Blocks(); - for (int i = 0; i < blocks.size(); ++i) { - std::shared_ptr block_desc = blocks[i]; - std::vector> ops = block_desc->Ops(); - for (int j = 0; j < ops.size(); ++j) { - std::shared_ptr op = ops[j]; - if (op->Type() == "conv2d" && op->Input("Input")[0] == "pixel") { - Attribute strides_attr = op->GetAttrMap().at("strides"); - std::vector stride = strides_attr.Get>(); - for (int k = 0; k < stride.size(); ++k) { - } - std::shared_ptr> conv = - std::make_shared>( - op->Type(), op->GetInputs(), op->GetOutputs(), - op->GetAttrMap(), program_.scope); - ops_of_block_[*block_desc.get()].push_back(conv); - } - } - } + // const std::vector> blocks = + to_predict_program_->Blocks(); + // for (int i = 0; i < blocks.size(); ++i) { + // std::shared_ptr block_desc = blocks[i]; + // std::vector> ops = block_desc->Ops(); + // for (int j = 0; j < ops.size(); ++j) { + // std::shared_ptr op = ops[j]; + // if (op->Type() == "conv2d" && op->Input("Input")[0] == + // "pixel") { + // Attribute strides_attr = op->GetAttrMap().at("strides"); + // std::vector stride = + // strides_attr.Get>(); for (int k = 0; k < + // stride.size(); ++k) { + // } + // std::shared_ptr> conv = + // std::make_shared>( + // op->Type(), op->GetInputs(), op->GetOutputs(), + // op->GetAttrMap(), program_.scope); + // ops_of_block_[*block_desc.get()].push_back(conv); + // } + // } + // } } template @@ -82,7 +84,6 @@ void Executor::predict(const Tensor &t, int block_id) { to_predict_program_->Block(block_id); for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { auto op = ops_of_block_[*to_predict_block.get()][j]; - // std::cout << "开始run" << std::endl; op->Run(); } } diff --git a/src/framework/executor.h b/src/framework/executor.h index 996565a89b..7500d67050 100644 --- a/src/framework/executor.h +++ b/src/framework/executor.h @@ -36,13 +36,18 @@ namespace framework { template class Executor { public: + Executor(); + Executor(const Program p); + std::shared_ptr predict(Tensor &t); - private: + public: const framework::Program program_; std::shared_ptr to_predict_program_; + void predict(const Tensor &t, int block_id); + std::map>>> ops_of_block_; diff --git a/src/framework/operator.h b/src/framework/operator.h index 75db3518a4..734b746d2a 100644 --- a/src/framework/operator.h +++ b/src/framework/operator.h @@ -60,10 +60,9 @@ template class OperatorBase : PaddleMobileObject { const VariableNameMap &Outputs() const { return outputs_; } const std::string &Type() const { return type_; } const AttributeMap &Attrs() const { return attrs_; } - void ClearVariables() const { + void ClearVariables(const std::vector &var_names) const { if (this->scope_) { - this->scope_->EraseVars(this->inputs_.at("Filter")); - this->scope_->EraseVars(this->inputs_.at("Input")); + this->scope_->EraseVars(var_names); } } diff --git a/src/io.cpp b/src/io.cpp index 560ca99e59..03f184859d 100644 --- a/src/io.cpp +++ b/src/io.cpp @@ -194,6 +194,9 @@ Loader::Load(const std::string &dirname) { framework::proto::BlockDesc block = program_desc_proto.blocks()[i]; LOG(kLOG_DEBUG) << "block: " << block.idx(); for (int j = 0; j < block.ops().size(); ++j) { + if (j == 2) { + break; + } framework::proto::OpDesc op = block.ops()[j]; LOG(kLOG_DEBUG1) << "op: " << op.type(); for (int m = 0; m < op.inputs_size(); ++m) { diff --git a/src/operators/conv_op.h b/src/operators/conv_op.h index dca34c483f..b23e641972 100644 --- a/src/operators/conv_op.h +++ b/src/operators/conv_op.h @@ -40,9 +40,9 @@ class ConvOp : public framework::OperatorWithKernel { void InferShape() const override; void Run() const { - operators::ConvKernel kernel; + operators::ConvKernel kernel; kernel.Compute(param_); - this->ClearVariables(); + this->ClearVariables({"Filter", "Input"}); } private: diff --git a/src/operators/kernel/arm/conv_kernel.cpp b/src/operators/kernel/arm/conv_kernel.cpp index 1de4ac495a..96f0bcd8b1 100644 --- a/src/operators/kernel/arm/conv_kernel.cpp +++ b/src/operators/kernel/arm/conv_kernel.cpp @@ -34,8 +34,7 @@ bool IsExpand(const std::vector &filter_dim, return !(filter_1 && strides_1 && padding_0 && dilation_1); } -template <> -void ConvKernel::Compute(const ConvParam ¶m) const { +template <> void ConvKernel::Compute(const ConvParam ¶m) const { LOG(kLOG_DEBUG) << param; const Tensor *input = param.Input(); @@ -149,7 +148,7 @@ void ConvKernel::Compute(const ConvParam ¶m) const { } } -template class ConvKernel; +template class ConvKernel; } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/kernel/arm/pool_kernel.cpp b/src/operators/kernel/arm/pool_kernel.cpp new file mode 100644 index 0000000000..34e17b671a --- /dev/null +++ b/src/operators/kernel/arm/pool_kernel.cpp @@ -0,0 +1,77 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ +#include "common/log.h" +#include + +namespace paddle_mobile { +namespace operators { + +inline void PoolBasic(std::string pooling_type, std::vector ksize, + std::vector strides, std::vector paddings, + const Tensor *in_x, Tensor *out) { + if (pooling_type == "max") { + math::PoolFunctor, float> pool2d_forward; + math::MaxPool pool_process; + pool2d_forward(*in_x, ksize, strides, paddings, pool_process, out); + + } else if (pooling_type == "avg") { + math::PoolFunctor, float> pool2d_forward; + math::AvgPool pool_process; + pool2d_forward(*in_x, ksize, strides, paddings, pool_process, out); + } +} + +template <> void PoolKernel::Compute(const PoolParam ¶m) const { + const Tensor *in_x = param.Input(); + Tensor *out = param.Output(); + std::string pooling_type = param.PoolingType(); + + std::vector ksize = param.Ksize(); + + std::vector strides = param.Strides(); + + std::vector paddings = param.Paddings(); + if (ksize.size() != 2) { + LOG(paddle_mobile::LogLevel::kLOG_ERROR) + << "Pool op only supports 2D and 3D input."; + } + + if (param.isGlobalPooling()) { + for (size_t i = 0; i < ksize.size(); ++i) { + paddings[i] = 0; + ksize[i] = static_cast(in_x->dims()[i + 2]); + } + } + + PoolBasic(pooling_type, ksize, strides, paddings, in_x, out); + + // if (param.isGlobalPooling() || ksize[0] != ksize[1] || + // strides[0] != strides[1] || strides[1] != 2 || + // paddings[0] != paddings[1] || paddings[1] > 1) { + // PoolBasic(pooling_type, ksize, strides, paddings, in_x, out); + // + // } else if (ksize[0] == 2) { + // + // } else if (ksize[0] == 3) { + // + // } else { + // PoolBasic(pooling_type, ksize, strides, paddings, in_x, out); + // } +} +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/kernel/conv_kernel.h b/src/operators/kernel/conv_kernel.h index 7431898c8d..5f184ed63b 100644 --- a/src/operators/kernel/conv_kernel.h +++ b/src/operators/kernel/conv_kernel.h @@ -29,7 +29,7 @@ namespace operators { using namespace framework; -template +template class ConvKernel : public framework::OpKernelBase { public: void Compute(const ConvParam ¶m) const; diff --git a/src/operators/kernel/pool_kernel.h b/src/operators/kernel/pool_kernel.h new file mode 100644 index 0000000000..6b3decd76f --- /dev/null +++ b/src/operators/kernel/pool_kernel.h @@ -0,0 +1,35 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ +#pragma once + +#include "framework/operator.h" +#include "operators/math/pooling.h" +#include "operators/op_param.h" + +namespace paddle_mobile { +namespace operators { + +using namespace framework; + +template +class PoolKernel : public framework::OpKernelBase { + public: + void Compute(const PoolParam ¶m) const; +}; +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/math/pool3x3.h b/src/operators/math/pool3x3.h new file mode 100644 index 0000000000..8ee50a980e --- /dev/null +++ b/src/operators/math/pool3x3.h @@ -0,0 +1,30 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ +#pragma once + +#if __ARM_NEON +#include +#endif // __ARM_NEON + +static void Pool3x3Max() { + // todo impl with neon +} + +static void Pool3x3Avg() { + // todo impl with neon +} diff --git a/src/operators/math/pool_2x2.h b/src/operators/math/pool_2x2.h new file mode 100644 index 0000000000..c9a9486ae8 --- /dev/null +++ b/src/operators/math/pool_2x2.h @@ -0,0 +1,30 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ +#pragma once + +#if __ARM_NEON +#include +#endif // __ARM_NEON + +static void Pool2x2Max() { + // todo impl with neon +} + +static void Pool2x2Avg() { + // todo impl with neon +} diff --git a/src/operators/math/pooling.cpp b/src/operators/math/pooling.cpp new file mode 100644 index 0000000000..5cfbe73dae --- /dev/null +++ b/src/operators/math/pooling.cpp @@ -0,0 +1,100 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ + +#include "pooling.h" +#include + +namespace paddle_mobile { +namespace operators { +namespace math { + +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ +template +class PoolFunctor { + public: + void operator()(const framework::Tensor &input, + const std::vector &ksize, + const std::vector &strides, + const std::vector &paddings, PoolProcess pool_process, + framework::Tensor *output) { + + const int batch_size = input.dims()[0]; + + const int input_height = input.dims()[2]; + + const int input_width = input.dims()[3]; + if (output == nullptr) { + DLOG << "output tensor is null"; + } + const int output_channels = output->dims()[1]; + + const int output_height = output->dims()[2]; + const int output_width = output->dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + + const int input_stride = input_height * input_width; + const int output_stride = output_height * output_width; + + const T *input_data = input.data(); + T *output_data = output->mutable_data(); + + for (int i = 0; i < batch_size; i++) { +#pragma omp parallel for + for (int c = 0; c < output_channels; ++c) { + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + + T ele = pool_process.initial(); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + pool_process.compute( + input_data[h * input_width + w], &ele); + } + } + int pool_size = (hend - hstart) * (wend - wstart); + pool_process.finalize(static_cast(pool_size), &ele); + output_data[ph * output_width + pw] = ele; + } + } + input_data += input_stride; + output_data += output_stride; + } + } + } +}; + +template class PoolFunctor, float>; +template class PoolFunctor, float>; +} // namespace math +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/math/pooling.h b/src/operators/math/pooling.h new file mode 100644 index 0000000000..a8ac2b2845 --- /dev/null +++ b/src/operators/math/pooling.h @@ -0,0 +1,69 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ + +#pragma once + +#include "common/log.h" +#include "framework/tensor.h" + +namespace paddle_mobile { +namespace operators { +namespace math { + +#define FLT_MAX __FLT_MAX__ + +/* + * \brief Extracting simple operations from pooling. + * Both MaxPool and AvgPool need "initial", "compute" and "finalize" + * operation. + * MaxPool initializes temp variable to the negative maximum to find the + * maximum value in the pooling field. + * AvgPool initializes temp variable to the zero to accumulate all values + * in pool pooling, and finally takes the average. + * MaxPoolGrad and AvgPoolGrad are gradient operations respectively. + */ +template class MaxPool { + public: + inline T initial() { return static_cast(-FLT_MAX); } + + inline void compute(const T &x, T *y) { *y = *y > x ? *y : x; } + + inline void finalize(const T &pool_field, T *y) {} +}; + +template class AvgPool { + public: + inline T initial() { return static_cast(0); } + + inline void compute(const T &x, T *y) { *y += x; } + + inline void finalize(const T &pool_field, T *y) { *y /= pool_field; } +}; + +template +class PoolFunctor { + public: + void operator()(const framework::Tensor &input, + const std::vector &ksize, + const std::vector &strides, + const std::vector &paddings, PoolProcess pool_compute, + framework::Tensor *output); +}; +} +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 4e65d1daa3..7241d90486 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -16,7 +16,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ==============================================================================*/ -#pragma once; +#pragma once #include "common/log.h" #include "common/type_define.h" @@ -341,5 +341,48 @@ class BatchNormParam : OpParam { bool is_test_; std::string data_format_; }; +class PoolParam : public OpParam { + public: + PoolParam(const VariableNameMap &inputs, const VariableNameMap &outputs, + const framework::AttributeMap &attrs, + const framework::Scope &scope) { + input_ = InputXFrom(inputs, scope); + + output_ = OutFrom(outputs, scope); + pooling_type_ = GetAttr("pooling_type", attrs); + ksize_ = GetAttr>("ksize", attrs); + strides_ = GetAttr>("strides", attrs); + paddings_ = GetAttr>("paddings", attrs); + ceil_mode_ = GetAttr("ceil_mode", attrs); + gloabal_pooling_ = GetAttr("global_pooling", attrs); + } + + const Tensor *Input() const { return input_; } + + Tensor *Output() const { return output_; } + + const std::string &PoolingType() const { return pooling_type_; } + + const std::vector &Ksize() const { return ksize_; } + + const std::vector &Strides() const { return strides_; } + + const std::vector &Paddings() const { return paddings_; } + + bool isCeilMode() const { return ceil_mode_; } + + bool isGlobalPooling() const { return gloabal_pooling_; } + + private: + Tensor *input_; + Tensor *output_; + std::string pooling_type_; + std::vector ksize_; + std::vector strides_; + std::vector paddings_; + bool ceil_mode_; + bool gloabal_pooling_ = false; +}; + } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/pool_op.cpp b/src/operators/pool_op.cpp new file mode 100644 index 0000000000..692e421506 --- /dev/null +++ b/src/operators/pool_op.cpp @@ -0,0 +1,60 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ + +#include "pool_op.h" + +namespace paddle_mobile { +namespace operators { + +int PoolOutputSize(int input_size, int filter_size, int padding, int stride, + bool ceil_mode) { + int output_size; + if (!ceil_mode) { + output_size = (input_size - filter_size + 2 * padding) / stride + 1; + } else { + output_size = + (input_size - filter_size + 2 * padding + stride - 1) / stride + 1; + } + return output_size; +} +template +void PoolOp::InferShape() const { + auto in_x_dims = param_.Input()->dims(); + std::vector ksize = param_.Ksize(); + std::vector paddings = param_.Paddings(); + std::vector strides = param_.Strides(); + bool ceil_mode = param_.isCeilMode(); + + if (param_.isGlobalPooling()) { + ksize.resize(static_cast(in_x_dims.size()) - 2); + for (size_t i = 0; i < ksize.size(); ++i) { + paddings[i] = 0; + ksize[i] = static_cast(in_x_dims[i + 2]); + } + } + std::vector output_shape({in_x_dims[0], in_x_dims[1]}); + for (size_t i = 0; i < ksize.size(); ++i) { + output_shape.push_back(PoolOutputSize( + in_x_dims[i + 2], ksize[i], paddings[i], strides[i], ceil_mode)); + } + param_.Output()->Resize(framework::make_ddim(output_shape)); + DLOG << "infer shape out size =" << param_.Output()->numel(); +} +template class PoolOp; +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/pool_op.h b/src/operators/pool_op.h new file mode 100644 index 0000000000..ea5d9953e4 --- /dev/null +++ b/src/operators/pool_op.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ + +#pragma once + +#include +#include +#include + +namespace paddle_mobile { +namespace operators { +using namespace framework; + +template +class PoolOp : public framework::OperatorWithKernel { + public: + PoolOp(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const framework::AttributeMap &attrs, + std::shared_ptr scope) + : framework::OperatorWithKernel(type, inputs, outputs, + attrs, scope), + param_(inputs, outputs, attrs, *scope) {} + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape() const override; + + void Run() const { + // InferShape(); + operators::PoolKernel kernel; + kernel.Compute(param_); + this->ClearVariables({"X"}); + } + + private: + PoolParam param_; +}; +} // namespace operators +} // namespace paddle_mobile diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index ba21ca1e12..1db3187bd6 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,6 +1,6 @@ # gen test -ADD_EXECUTABLE(test-conv-op operators/test_cov_op.cpp test_helper.h test_include.h) +ADD_EXECUTABLE(test-conv-op operators/test_cov_op.cpp test_helper.h test_include.h framework/executor_for_test.h framework/executor_for_test.cpp) target_link_libraries(test-conv-op paddle-mobile) # gen test @@ -32,5 +32,11 @@ ADD_EXECUTABLE(test-load framework/test_load.cpp) target_link_libraries(test-load paddle-mobile) # gen test log +# gen test ADD_EXECUTABLE(test-optimize framework/test_optimize.cpp) target_link_libraries(test-optimize paddle-mobile) + + +#gen test +ADD_EXECUTABLE(test-pool operators/test_pool_op.cpp test_helper.h test_include.h framework/executor_for_test.h framework/executor_for_test.cpp) +target_link_libraries(test-pool paddle-mobile) \ No newline at end of file diff --git a/test/framework/executor_for_test.cpp b/test/framework/executor_for_test.cpp new file mode 100644 index 0000000000..6348c491c9 --- /dev/null +++ b/test/framework/executor_for_test.cpp @@ -0,0 +1,76 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ + +#include "executor_for_test.h" + +template +Executor4Test::Executor4Test(const Program p, + std::string op_type) + : Executor(p) { + + if (this->program_.originProgram == nullptr) { + LOG(paddle_mobile::LogLevel::kLOG_ERROR) + << "to_predict_program_ == nullptr"; + } + + const std::vector> blocks = + this->to_predict_program_->Blocks(); + + for (int i = 0; i < blocks.size(); ++i) { + std::shared_ptr block_desc = blocks[i]; + std::vector> ops = block_desc->Ops(); + for (int j = 0; j < ops.size(); ++j) { + std::shared_ptr op = ops[j]; + if (op->Type() == op_type) { + std::shared_ptr op_ptr = std::make_shared( + op->Type(), op->GetInputs(), op->GetOutputs(), + op->GetAttrMap(), this->program_.scope); + + this->ops_of_block_[*block_desc.get()].push_back(op_ptr); + break; + } + } + } +} + +template +std::shared_ptr +Executor4Test::predict(Tensor &t, std::string input, + std::string output, DDim dDim) { + + auto scope = this->program_.scope; + Variable *g_feed_value = scope->Var(input); + auto tensor = g_feed_value->GetMutable(); + tensor->ShareDataWith(t); + + Variable *con_output = scope->Var(output); + Tensor *output_tensor = con_output->GetMutable(); + output_tensor->mutable_data(dDim); + std::shared_ptr out_tensor = std::make_shared(); + out_tensor.reset(output_tensor); + + Executor::predict(t, 0); + return out_tensor; +} + +template class Executor4Test< + paddle_mobile::CPU, + paddle_mobile::operators::ConvOp>; +template class Executor4Test< + paddle_mobile::CPU, + paddle_mobile::operators::PoolOp>; diff --git a/test/framework/executor_for_test.h b/test/framework/executor_for_test.h new file mode 100644 index 0000000000..381fd03f15 --- /dev/null +++ b/test/framework/executor_for_test.h @@ -0,0 +1,35 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ + +#pragma once + +#include "common/log.h" +#include "framework/executor.h" +#include "operators/conv_op.h" +#include "operators/pool_op.h" + +using namespace paddle_mobile::framework; + +template +class Executor4Test : public Executor { + public: + Executor4Test(const Program p, std::string op_type); + + std::shared_ptr predict(Tensor &t, std::string input, + std::string output, DDim dDim); +}; diff --git a/test/operators/test_cov_op.cpp b/test/operators/test_cov_op.cpp index 1902e44f63..00af04d47d 100644 --- a/test/operators/test_cov_op.cpp +++ b/test/operators/test_cov_op.cpp @@ -16,49 +16,31 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ==============================================================================*/ +#include "../framework/executor_for_test.h" #include "../test_helper.h" -#include "framework/executor.h" #include "io.h" int main() { - - std::string data_set = "cifar10"; - // - // if (data_set == "cifar10") { - // SetupTensor(&input, {FLAGS_batch_size, 3, 32, 32}, - // static_cast(0), static_cast(1)); - // } else if (data_set == "imagenet") { - // SetupTensor(&input, {FLAGS_batch_size, 3, 224, 224}, - // static_cast(0), static_cast(1)); - // } else { - // LOG(FATAL) << "Only cifar10 or imagenet is supported."; - // } - paddle_mobile::Loader loader; - auto program = loader.Load(std::string("../../../test/models/googlenet")); + auto program = loader.Load(std::string("../models/googlenet")); + if (program.originProgram == nullptr) { + DLOG << "program file read fail"; + } - paddle_mobile::framework::Executor executor(program); + Executor4Test> + executor(program, "conv2d"); paddle_mobile::framework::Tensor input; SetupTensor(&input, {1, 3, 32, 32}, static_cast(0), static_cast(1)); - float *input_ptr = input.data(); - for (int i = 0; i < input.numel(); ++i) { - // std::cout << input_ptr[i] << std::endl; - } - // std::cout << "input: " << input.memory_size() << std::endl; - // std::cout << "input: " << input.numel() << std::endl; - - auto output = executor.predict(input); - - // std::cout << "output: " << output->memory_size() << std::endl; - // std::cout << "output: " << output->numel() << std::endl; - - // float* output_ptr = output->data(); - // for (int j = 0; j < output->numel(); ++j) { - // std::cout << " value of output: " << output_ptr[j] << std::endl; - // + auto output = + executor.predict(input, "data", "conv2d_0.tmp_0", {1, 64, 56, 56}); + float *output_ptr = output->data(); + for (int j = 0; j < output->numel(); ++j) { + DLOG << " value of output: " << output_ptr[j]; + } return 0; } diff --git a/test/operators/test_pool_op.cpp b/test/operators/test_pool_op.cpp new file mode 100644 index 0000000000..44be2fb5bc --- /dev/null +++ b/test/operators/test_pool_op.cpp @@ -0,0 +1,46 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +==============================================================================*/ + +#include "../framework/executor_for_test.h" +#include "../test_helper.h" +#include "io.h" + +int main() { + paddle_mobile::Loader loader; + auto program = loader.Load(std::string("../models/googlenet")); + if (program.originProgram == nullptr) { + DLOG << "program read file"; + } + + Executor4Test> + executor(program, "pool2d"); + + paddle_mobile::framework::Tensor input; + SetupTensor(&input, {1, 64, 112, 112}, static_cast(0), + static_cast(1)); + + auto output = executor.predict(input, "conv2d_0.tmp_1", "pool2d_0.tmp_0", + {1, 64, 56, 56}); + + float *output_ptr = output->data(); + for (int j = 0; j < output->numel(); ++j) { + DLOG << " value of output: " << output_ptr[j]; + } + return 0; +} diff --git a/test/test_helper.h b/test/test_helper.h index c189aa123a..ffb9131be7 100644 --- a/test/test_helper.h +++ b/test/test_helper.h @@ -16,6 +16,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ==============================================================================*/ #pragma once +#include "common/log.h" #include "framework/ddim.h" #include "framework/tensor.h" #include -- GitLab