diff --git a/src/io.cpp b/src/io.cpp index 417d6869a8d7f1f4df6b1186dd2c6bf5f47933ef..4627471c4679cee4c408bc2a005dd15e6f38530d 100644 --- a/src/io.cpp +++ b/src/io.cpp @@ -144,26 +144,25 @@ const framework::Program Loader::Load( std::make_shared(); program.scope = scope; - // originProgramDesc->Block(0); - - // for (const auto &block : originProgramDesc->Blocks()) { - // for (int i = 0; i < block->Vars().size(); ++i) { - // std::shared_ptr var_desc = block->Vars()[i]; - //// auto var = scope->Var(var_desc->Name()); - // if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { - // if (var_desc->Persistable() && - // var_desc->GetType() != framework::proto::VarType::FEED_MINIBATCH - // && var_desc->GetType() != framework::proto::VarType::FETCH_LIST) - // { - // // auto tensor = var->GetMutable(); - // // to load - // // LoadVar(tensor, dirname + "/" + var_desc->Name()); - // } - // } else { - // // TODO(codeWorm): some. - // } - // } - // } + originProgramDesc->Block(0); + + for (const auto &block : originProgramDesc->Blocks()) { + for (int i = 0; i < block->Vars().size(); ++i) { + std::shared_ptr var_desc = block->Vars()[i]; + auto var = scope->Var(var_desc->Name()); + if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { + if (var_desc->Persistable() && + var_desc->GetType() != framework::proto::VarType::FEED_MINIBATCH && + var_desc->GetType() != framework::proto::VarType::FETCH_LIST) { + auto tensor = var->GetMutable(); + // to load + LoadVar(tensor, dirname + "/" + var_desc->Name()); + } + } else { + // TODO(codeWorm): some. + } + } + } #ifdef PADDLE_MOBILE_DEBUG for (const auto &block : program_desc_proto.blocks()) { diff --git a/src/operators/kernel/arm/relu_kernel.cpp b/src/operators/kernel/arm/relu_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2441d453b9fa4e5423fd7087c14f7fce6cbaa825 --- /dev/null +++ b/src/operators/kernel/arm/relu_kernel.cpp @@ -0,0 +1,33 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "operators/kernel/relu_kernel.h" + +namespace paddle_mobile { +namespace operators { + +template <> +void ReluKernel::Compute(const ReluParam ¶m) const { + const auto *input_x = param.InputX(); + auto *input_x_ptr = input_x->data(); + auto *out = param.Out(); + auto *out_ptr = out->mutable_data(); + for (int i = 0; i < input_x->numel(); i++) { + out_ptr[i] = input_x_ptr[i] > 0 ? input_x_ptr[i] : 0; + } +} +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/kernel/relu_kernel.h b/src/operators/kernel/relu_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..83b4548f3e5421657ae6f79bd226e16e1aba7ffb --- /dev/null +++ b/src/operators/kernel/relu_kernel.h @@ -0,0 +1,29 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "framework/operator.h" +#include "operators/op_param.h" + +#pragma once; + +namespace paddle_mobile { +namespace operators { + +template +class ReluKernel : public framework::OpKernelBase { + public: + void Compute(const ReluParam& param) const; +}; +} // namespace operators +} // namespace paddle_mobile diff --git a/src/operators/op_param.h b/src/operators/op_param.h index b9cbcd7e5ec0b5ee6d15addd4934611719c62245..c2f3a5e7cff010a752bb44133c02ebfe489c29a1 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -669,5 +669,23 @@ class ReshapeParam : public OpParam { vector shape_; bool inplace_; }; + +class ReluParam : public OpParam { + public: + ReluParam(const VariableNameMap &inputs, const VariableNameMap &outputs, + const AttributeMap &attrs, const Scope &scope) { + input_x_ = InputXFrom(inputs, scope); + out_ = OutFrom(outputs, scope); + } + + const Tensor *InputX() const { return input_x_; } + + Tensor *Out() const { return out_; } + + private: + Tensor *input_x_; + Tensor *out_; +}; + } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/relu_op.cpp b/src/operators/relu_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5f861579ab47f09b55f8d255103558a5209fedb9 --- /dev/null +++ b/src/operators/relu_op.cpp @@ -0,0 +1,30 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "operators/relu_op.h" +namespace paddle_mobile { +namespace operators { + +template +void ReluOp::InferShape() const { + auto input_dims = param_.InputX()->dims(); + param_.Out()->Resize(input_dims); +} +template class ReluOp; +} // namespace operators +} // namespace paddle_mobile + +namespace ops = paddle_mobile::operators; +USE_OP(relu); +REGISTER_OPERATOR(relu, ops::ReluOp); diff --git a/src/operators/relu_op.h b/src/operators/relu_op.h new file mode 100644 index 0000000000000000000000000000000000000000..26bee848c1b44c53b73fbe58e6cbe45e95d91a1e --- /dev/null +++ b/src/operators/relu_op.h @@ -0,0 +1,51 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +#include "framework/operator.h" +#include "operators/kernel/relu_kernel.h" +#include "operators/op_param.h" + +namespace paddle_mobile { +namespace operators { + +using paddle_mobile::framework::Tensor; + +template +class ReluOp : public framework::OperatorWithKernel { + public: + ReluOp(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const framework::AttributeMap attrs, + std::shared_ptr scope) + : framework::OperatorWithKernel(type, inputs, outputs, attrs, + scope), + param_(inputs, outputs, attrs, *scope) {} + + void Run() const { + operators::ReluKernel kernel; + kernel.Compute(param_); + } + + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape() const override; + + protected: + ReluParam param_; +}; + +} // namespace operators +} // namespace paddle_mobile diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index e29348c917c1390a9e9357bca2eb4dafe1db6291..39a580b73de985eb265debef20c07fcb32e4378b 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -45,6 +45,9 @@ target_link_libraries(test-multiclassnms-op paddle-mobile) # gen test ADD_EXECUTABLE(test-reshape-op operators/test_reshape_op.cpp test_helper.h test_include.h) target_link_libraries(test-reshape-op paddle-mobile) +# gen test +ADD_EXECUTABLE(test-relu-op operators/test_relu_op.cpp test_helper.h test_include.h) +target_link_libraries(test-relu-op paddle-mobile) # gen test log ADD_EXECUTABLE(test-log common/test_log.cpp) diff --git a/test/operators/test_relu_op.cpp b/test/operators/test_relu_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c2084f8c89cb7026e717bba522d8223dbea9e95 --- /dev/null +++ b/test/operators/test_relu_op.cpp @@ -0,0 +1,119 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "../test_include.h" +#include "operators/relu_op.h" + +namespace paddle_mobile { +namespace framework { + +template +class TestReluOp { + public: + explicit TestReluOp(const Program p) : program_(p) { + if (use_optimize_) { + to_predict_program_ = program_.optimizeProgram; + } else { + to_predict_program_ = program_.originProgram; + } + + const std::vector> blocks = + to_predict_program_->Blocks(); + // DLOG << " **block size " << blocks.size(); + for (auto block_desc : blocks) { + std::vector> ops = block_desc->Ops(); + // DLOG << " ops " << ops.size(); + for (auto op : ops) { + if (op->Type() == "relu" && + op->Input("X")[0] == "batch_norm_34.tmp_2") { + DLOG << "in"; + std::shared_ptr> test_op = + std::make_shared>( + op->Type(), op->GetInputs(), op->GetOutputs(), + op->GetAttrMap(), program_.scope); + ops_of_block_[*block_desc.get()].push_back(test_op); + } + } + } + } + + std::shared_ptr predict(const Tensor &t1) { + // feed + auto scope = program_.scope; + Variable *x1_feed_value = scope->Var("batch_norm_34.tmp_2"); + auto tensor_x1 = x1_feed_value->GetMutable(); + tensor_x1->ShareDataWith(t1); + + Variable *output = scope->Var("batch_norm_34.tmp_3"); + auto *output_tensor = output->GetMutable(); + output_tensor->mutable_data({1, 2, 3, 4}); + + // DLOG << typeid(output_tensor).name(); + // DLOG << "output_tensor dims: " << output_tensor->dims(); + + std::shared_ptr out_tensor = std::make_shared(); + out_tensor.reset(output_tensor); + + predict(t1, 0); + + return out_tensor; + // return outvars_tensor; + } + + private: + const framework::Program program_; + std::shared_ptr to_predict_program_; + std::map>>> + ops_of_block_; + bool use_optimize_ = false; + + void predict(const Tensor &t1, int block_id) { + std::shared_ptr to_predict_block = + to_predict_program_->Block(block_id); + for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { + auto op = ops_of_block_[*to_predict_block.get()][j]; + DLOG << "op -> run()"; + op->Run(); + } + } +}; + +template class TestReluOp; +} // namespace framework +} // namespace paddle_mobile + +int main() { + DLOG << "----------**********----------"; + DLOG << "begin to run Relu Test"; + paddle_mobile::Loader loader; + auto program = loader.Load(std::string("../../test/models/mobilenet+ssd")); + + /// input x (1,3,300,300) + paddle_mobile::framework::Tensor inputx1; + SetupTensor(&inputx1, {1, 2, 3, 4}, static_cast(-1), + static_cast(1)); + auto *inputx1_ptr = inputx1.data(); + + paddle_mobile::framework::TestReluOp testReluOp(program); + + auto output = testReluOp.predict(inputx1); + auto *output_ptr = output->data(); + + for (int i = 0; i < output->numel(); i++) { + DLOG << output_ptr[i]; + } + return 0; +}