diff --git a/src/common/types.h b/src/common/types.h index 252c747d75fedd1dc02f8aff300c4b265cfdeb15..b25ae329931a79e9dd73b4a213a86aacc2464787 100644 --- a/src/common/types.h +++ b/src/common/types.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once; #include -#include #include +#include namespace paddle_mobile { enum class Precision : int { FP32 = 0 }; @@ -94,18 +94,18 @@ static const std::string G_OP_TYPE_FEED = "feed"; static const std::string G_OP_TYPE_FETCH = "fetch"; static std::unordered_map< - std::string, std::pair, std::vector>> - op_input_output_key = {{G_OP_TYPE_CONV, {{"Input"}, {"Output"}}}, - {G_OP_TYPE_RELU, {{"X"}, {"Out"}}}, - {G_OP_TYPE_SOFTMAX, {{"X"}, {"Out"}}}, - {G_OP_TYPE_MUL, {{"X"}, {"Out"}}}, - {G_OP_TYPE_ELEMENTWISE_ADD, {{"X", "Y"}, {"Out"}}}, - {G_OP_TYPE_POOL2D, {{"X"}, {"Out"}}}, - {G_OP_TYPE_BATCHNORM, {{"X"}, {"Y"}}}, - {G_OP_TYPE_LRN, {{"X"}, {"Out"}}}, - {G_OP_TYPE_CONCAT, {{"X"}, {"Out"}}}, - {G_OP_TYPE_SPLIT, {{"X"}, {"Out"}}}, - {G_OP_TYPE_FEED, {{"X"}, {"Out"}}}, - {G_OP_TYPE_FETCH, {{"X"}, {"Out"}}}}; + std::string, std::pair, std::vector>> + op_input_output_key = {{G_OP_TYPE_CONV, {{"Input"}, {"Output"}}}, + {G_OP_TYPE_RELU, {{"X"}, {"Out"}}}, + {G_OP_TYPE_SOFTMAX, {{"X"}, {"Out"}}}, + {G_OP_TYPE_MUL, {{"X"}, {"Out"}}}, + {G_OP_TYPE_ELEMENTWISE_ADD, {{"X", "Y"}, {"Out"}}}, + {G_OP_TYPE_POOL2D, {{"X"}, {"Out"}}}, + {G_OP_TYPE_BATCHNORM, {{"X"}, {"Y"}}}, + {G_OP_TYPE_LRN, {{"X"}, {"Out"}}}, + {G_OP_TYPE_CONCAT, {{"X"}, {"Out"}}}, + {G_OP_TYPE_SPLIT, {{"X"}, {"Out"}}}, + {G_OP_TYPE_FEED, {{"X"}, {"Out"}}}, + {G_OP_TYPE_FETCH, {{"X"}, {"Out"}}}}; } // namespace paddle_mobile diff --git a/src/framework/operator.h b/src/framework/operator.h index e9dc6f6fb75df9be0e56cc599f61bb14f2788795..8e5e55fb469d65212795d13bf17ec02de27c2db1 100644 --- a/src/framework/operator.h +++ b/src/framework/operator.h @@ -19,20 +19,20 @@ limitations under the License. */ #include #include -#include "common/types.h" #include "common/enforce.h" -#include "common/variant.h" -#include "framework/scope.h" -#include "framework/tensor.h" -#include "framework/op_info.h" #include "common/type_define.h" -#include "framework/variable.h" +#include "common/types.h" +#include "common/variant.h" #include "framework/attribute.h" -#include "framework/op_registry.h" +#include "framework/op_info.h" #include "framework/op_kernel_type.h" -#include "framework/program/block_desc.h" +#include "framework/op_registry.h" #include "framework/paddle_mobile_object.h" +#include "framework/program/block_desc.h" #include "framework/program/program-optimize/node.h" +#include "framework/scope.h" +#include "framework/tensor.h" +#include "framework/variable.h" namespace paddle_mobile { namespace framework { @@ -77,6 +77,7 @@ class OperatorBase : PaddleMobileObject { * @b 根据输入形状和参数计算出输出形状 * */ virtual void InferShape() const = 0; + protected: std::shared_ptr scope_; std::string type_; diff --git a/src/framework/program/program-optimize/node.cpp b/src/framework/program/program-optimize/node.cpp index f260fd0b61f568a36278a5585af3352065336a56..820fa6a443c62c4cfdb38f4d42e6d7805371c2d3 100644 --- a/src/framework/program/program-optimize/node.cpp +++ b/src/framework/program/program-optimize/node.cpp @@ -82,11 +82,14 @@ void Node::OpDescs(std::vector> *op_desc, DLOG << "当前 op desc 输出数不为 1 "; can_add_split = false; } - for (const auto& output : outputs_) { - if (op_input_output_key.find(output->op_desc_->type_) != op_input_output_key.end()) { + for (const auto &output : outputs_) { + if (op_input_output_key.find(output->op_desc_->type_) != + op_input_output_key.end()) { auto inputs_and_outputs = op_input_output_key[output->op_desc_->type_]; - auto outputs_of_output = output->op_desc_->Output(inputs_and_outputs.second[0]); - auto inputs_of_output = output->op_desc_->Input(inputs_and_outputs.first[0]); + auto outputs_of_output = + output->op_desc_->Output(inputs_and_outputs.second[0]); + auto inputs_of_output = + output->op_desc_->Input(inputs_and_outputs.first[0]); for (int i = 0; i < inputs_of_output.size(); ++i) { std::string input_of_output = inputs_of_output[i]; for (int j = 0; j < outputs_of_output.size(); ++j) { @@ -121,13 +124,17 @@ void Node::OpDescs(std::vector> *op_desc, if (can_add_split) { adding_thread = true; - std::shared_ptr split_op_desc = std::make_shared(); + std::shared_ptr split_op_desc = + std::make_shared(); split_op_desc->type_ = G_OP_TYPE_SPLIT; - auto outputs = this->op_desc_->Output(op_input_output_key[this->op_desc_->Type()].second[0]); - - split_op_desc->inputs_ = {{op_input_output_key[G_OP_TYPE_SPLIT].first[0], outputs}}; - auto &split_outputs = split_op_desc->outputs_[op_input_output_key[G_OP_TYPE_SPLIT].second[0]]; - for (const auto& output : outputs_) { + auto outputs = this->op_desc_->Output( + op_input_output_key[this->op_desc_->Type()].second[0]); + + split_op_desc->inputs_ = { + {op_input_output_key[G_OP_TYPE_SPLIT].first[0], outputs}}; + auto &split_outputs = + split_op_desc->outputs_[op_input_output_key[G_OP_TYPE_SPLIT].second[0]]; + for (const auto &output : outputs_) { split_outputs.push_back(outputs[0]); } DLOG << "add split"; diff --git a/src/framework/program/program-optimize/program_optimize.cpp b/src/framework/program/program-optimize/program_optimize.cpp index cd6899efe365440057a89b3d7222a880ecf7608c..737fed9bd56bdec92774ba364e035ba581258e57 100644 --- a/src/framework/program/program-optimize/program_optimize.cpp +++ b/src/framework/program/program-optimize/program_optimize.cpp @@ -19,7 +19,7 @@ namespace paddle_mobile { namespace framework { -//std::shared_ptr ProgramOptimize::Optimize() {} +// std::shared_ptr ProgramOptimize::Optimize() {} std::shared_ptr ProgramOptimize::FushionOptimize( std::shared_ptr ori_des) { diff --git a/src/io.cpp b/src/io.cpp index 23b3e21ee811d732789399547e9255285659806a..002e73b79648320c229786f8492f4c0e8b299d83 100644 --- a/src/io.cpp +++ b/src/io.cpp @@ -18,15 +18,14 @@ limitations under the License. */ #include "common/log.h" #include "common/enforce.h" -#include "common/enforce.h" -#include "framework/scope.h" -#include "framework/tensor.h" -#include "framework/operator.h" -#include "framework/lod_tensor.h" #include "framework/framework.pb-c.h" -#include "framework/program/var_desc.h" -#include "framework/program/program_desc.h" +#include "framework/lod_tensor.h" +#include "framework/operator.h" #include "framework/program/program-optimize/program_optimize.h" +#include "framework/program/program_desc.h" +#include "framework/program/var_desc.h" +#include "framework/scope.h" +#include "framework/tensor.h" namespace paddle_mobile { using framework::Variable; @@ -202,7 +201,6 @@ const framework::Program Loader::Load( // DLOG << "var name-- " << var_desc->Name(); auto var = scope->Var(var_desc->Name()); - if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) { if (var_desc->Persistable() && var_desc->Type() != framework::VARTYPE_TYPE_FEED_MINIBATCH && @@ -226,7 +224,8 @@ const framework::Program Loader::Load( if (optimize) { framework::ProgramOptimize program_optimize; - program.optimizeProgram = program_optimize.FushionOptimize(originProgramDesc); + program.optimizeProgram = + program_optimize.FushionOptimize(originProgramDesc); } paddle_mobile__framework__proto__program_desc__free_unpacked(c_program, NULL); @@ -238,7 +237,8 @@ template class Loader; #pragma mark - executor template -Executor::Executor(const framework::Program p, int batch_size, bool use_optimize) +Executor::Executor(const framework::Program p, int batch_size, + bool use_optimize) : program_(p), batch_size_(batch_size), use_optimize_(use_optimize) { if (use_optimize_) { to_predict_program_ = program_.optimizeProgram; diff --git a/src/io.h b/src/io.h index 8a73beba6d82fb794022c65cd23d389c16465a5e..de2d359bf58d1ad328defd2f51e87e2d6bfe6295 100644 --- a/src/io.h +++ b/src/io.h @@ -30,7 +30,8 @@ namespace paddle_mobile { template class Loader : PaddleMobileObject { public: - const framework::Program Load(const std::string &dirname, bool optimize = true); + const framework::Program Load(const std::string &dirname, + bool optimize = true); private: void LoadVar(framework::Variable *variable, @@ -45,7 +46,8 @@ class Executor { Executor() = default; - Executor(const framework::Program p, int batch_size = 1, bool use_optimize = true); + Executor(const framework::Program p, int batch_size = 1, + bool use_optimize = true); // std::shared_ptr Predict(framework::Tensor &t); diff --git a/src/operators/fusion_conv_add_relu_op.h b/src/operators/fusion_conv_add_relu_op.h index cab55dc36173168c144a54afa5944c286af2593b..1fa3399cf22df76b429d89fa89b0cb620257271f 100644 --- a/src/operators/fusion_conv_add_relu_op.h +++ b/src/operators/fusion_conv_add_relu_op.h @@ -31,7 +31,8 @@ class FushionConvAddReluOpMatcher : public framework::FusionOpMatcher { void FolderNodes(framework::Node &node) { std::vector> origin_descs = node.OpDescs(node_.Depth()); - node.Folder(node_.Depth(), Type(), {{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}}); + node.Folder(node_.Depth(), Type(), + {{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}}); } std::string Type() { return G_OP_TYPE_FUSION_CONV_ADD_RELU; } }; diff --git a/src/operators/fusion_fc_op.h b/src/operators/fusion_fc_op.h index fd6f2658fd1c4e98b1b214b0c5f70fefb4d18a71..fb49fa61b202401871b8c6c18e51b15ab42dc1e4 100644 --- a/src/operators/fusion_fc_op.h +++ b/src/operators/fusion_fc_op.h @@ -35,7 +35,8 @@ class FusionFcMatcher : public framework::FusionOpMatcher { void FolderNodes(framework::Node &node) { vector> origin_descs = node.OpDescs(node_.Depth()); - node.Folder(node_.Depth(), Type(), {{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}}); + node.Folder(node_.Depth(), Type(), + {{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}}); } std::string Type() { return G_OP_TYPE_FC; } diff --git a/src/operators/kernel/arm/relu_kernel.cpp b/src/operators/kernel/arm/relu_kernel.cpp index 96fcb7c30884c1e7163845e3d483f9cf6614a03a..586d981175184e2da03f2949390932b888d67f4a 100644 --- a/src/operators/kernel/arm/relu_kernel.cpp +++ b/src/operators/kernel/arm/relu_kernel.cpp @@ -20,7 +20,6 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { - template struct ReluFunctor { inline T operator()(T in) const { return in > 0 ? in : 0; } diff --git a/src/operators/relu_op.cpp b/src/operators/relu_op.cpp index 35791b28845063723f1f0f8fc1753ddeae348756..21bcc605282ffc590025e87b609cccc855a631d1 100644 --- a/src/operators/relu_op.cpp +++ b/src/operators/relu_op.cpp @@ -27,7 +27,8 @@ template class ReluOp; /* * @b 每一个 op 都需要注册一下的, - * USE_OP的参数 和 REGISTER_OPERATOR的第一个参数 都是需要和model中类型对应起来的 + * USE_OP的参数 和 REGISTER_OPERATOR的第一个参数 + * 都是需要和model中类型对应起来的 * */ namespace ops = paddle_mobile::operators; USE_OP(relu); diff --git a/src/operators/relu_op.h b/src/operators/relu_op.h index aed907e0f871fd73326e05035090f744e3ba6cdc..7be8cd249cb22255dff237da6c8653e6237bbc3f 100644 --- a/src/operators/relu_op.h +++ b/src/operators/relu_op.h @@ -38,7 +38,7 @@ class ReluOp : public framework::OperatorWithKernel { scope), param_(inputs, outputs, attrs, *scope) {} - /* + /* * @b op 进行运算, 调用相应的 kernel 进行运算 * */ void RunImpl() const { diff --git a/test/executor_for_test.h b/test/executor_for_test.h index 2893eccd80e141f479c903b36831f9e9476f052d..ce3c84e986eb7ef5e9602209cedb3dbabbf06e85 100644 --- a/test/executor_for_test.h +++ b/test/executor_for_test.h @@ -17,9 +17,9 @@ limitations under the License. */ #include #include -#include "io.h" #include "common/log.h" #include "framework/op_registry.h" +#include "io.h" #include "operators/conv_op.h" #include "operators/elementwise_add_op.h" #include "operators/pool_op.h" diff --git a/test/framework/test_load.cpp b/test/framework/test_load.cpp index 19871f9555708aa6a5c6c23532345e2053f3c7d1..95357547e1b93d3060481b55eaf46c919496785d 100644 --- a/test/framework/test_load.cpp +++ b/test/framework/test_load.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "io.h" #include "../test_helper.h" +#include "io.h" int main() { paddle_mobile::Loader loader; diff --git a/test/framework/test_optimize.cpp b/test/framework/test_optimize.cpp index b371fb63b9310e9b057322d8d8903dca8afda944..f0392cfec02c8ea764cd3d6dc9f50b2415c39e2c 100644 --- a/test/framework/test_optimize.cpp +++ b/test/framework/test_optimize.cpp @@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "io.h" #include "../test_helper.h" #include "framework/program/program-optimize/node.h" #include "framework/program/program-optimize/program_optimize.h" +#include "io.h" int main() { paddle_mobile::Loader loader; // "../../../test/models/googlenet" auto program = loader.Load(g_googlenet); paddle_mobile::framework::ProgramOptimize optimize; -// program.originProgram->Description("origin"); + // program.originProgram->Description("origin"); auto optimize_program = optimize.FushionOptimize(program.originProgram); if (optimize_program != nullptr) { optimize_program->Description("optimize"); diff --git a/test/operators/test_batchnorm_op.cpp b/test/operators/test_batchnorm_op.cpp index 0acd6ea57267cb859f40ed1056f93b793e0e780d..38d9f624909fd645c78ae56a5d9efff9fa961795 100644 --- a/test/operators/test_batchnorm_op.cpp +++ b/test/operators/test_batchnorm_op.cpp @@ -128,8 +128,7 @@ int main() { DLOG << "----------**********----------"; DLOG << "begin to run BatchNormOp Test"; paddle_mobile::Loader loader; - auto program = loader.Load(std::string( - g_resnet)); + auto program = loader.Load(std::string(g_resnet)); /// input x (4,10,2,2) paddle_mobile::framework::Tensor inputx1; diff --git a/test/test_helper.h b/test/test_helper.h index 0fec49e4e92e994f0b988b6f2fcfc4b2dfa81553..dba4dec9bbc0a8066eef6c6dea9828dfb9954200 100644 --- a/test/test_helper.h +++ b/test/test_helper.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include #include +#include #include "common/log.h" #include "framework/ddim.h" diff --git a/test/test_include.h b/test/test_include.h index 0046bdb4e415c87b0462b7c2e3d243fa194e3948..25efbb9f4c00921495a5ab054acdde329c4ef58a 100644 --- a/test/test_include.h +++ b/test/test_include.h @@ -20,7 +20,6 @@ limitations under the License. */ #include "./test_helper.h" #include "common/enforce.h" -#include "io.h" #include "common/log.h" #include "framework/lod_tensor.h" #include "framework/operator.h" @@ -30,3 +29,4 @@ limitations under the License. */ #include "framework/scope.h" #include "framework/tensor.h" #include "framework/variable.h" +#include "io.h"