diff --git a/CMakeLists.txt b/CMakeLists.txt index 5635dff9ef0d98f43510d1fe8b219c18e56cd2a1..5b6de21e060052f4cf9770a6ba7bde70196b5c0e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,8 +1,13 @@ cmake_minimum_required(VERSION 3.0) project(paddle-mobile) -#add_definitions(-DPADDLE_MOBILE_DEBUG) +add_definitions(-DPADDLE_MOBILE_DEBUG) add_definitions(-DENABLE_EXCEPTION) +#add_definitions(-DARMV7) +#add_definitions(-DARMV8) +#add_definitions(-DIOS) +add_definitions(-DX86) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(CMAKE_BUILD_TYPE RelWithDebInfo) set(CMAKE_VERBOSE_MAKEFILE ON) @@ -16,6 +21,75 @@ file(GLOB_RECURSE PADDLE_MOBILE_H src/*.h) include_directories(src/) +if (googlenet) + add_definitions(-DCONCAT_OP) + add_definitions(-DCONV_OP) + add_definitions(-DLRN_OP) + add_definitions(-DMUL_OP) + add_definitions(-DELEMENTWISEADD_OP) + add_definitions(-DFUSION_FC_OP) + add_definitions(-DPOOL_OP) + add_definitions(-DRELU_OP) +elseif (mobilenet) + add_definitions(-DCONV_OP) + add_definitions(-DELEMENTWISEADD_OP) + add_definitions(-DRELU_OP) + add_definitions(-DSOFTMAX_OP) + add_definitions(-DSOFTMAX_OP) + add_definitions(-DDEPTHWISECONV_OP) + add_definitions(-DBATCHNORM_OP) + add_definitions(-DPOOL_OP) + add_definitions(-DRESHAPE_OP) +elseif (yolo) + add_definitions(-DBATCHNORM_OP) + add_definitions(-DCONV_OP) + add_definitions(-DRELU_OP) + add_definitions(-DELEMENTWISEADD_OP) +elseif (squeezenet) + add_definitions(-DCONCAT_OP) + add_definitions(-DCONV_OP) + add_definitions(-DRELU_OP) + add_definitions(-DELEMENTWISEADD_OP) + add_definitions(-DPOOL_OP) + add_definitions(-DRESHAPE_OP) + add_definitions(-DSOFTMAX_OP) +elseif(resnet) + add_definitions(-DCONV_OP) + add_definitions(-DBATCHNORM_OP) + add_definitions(-DELEMENTWISEADD_OP) + add_definitions(-DSOFTMAX_OP) + add_definitions(-DMUL_OP) + add_definitions(-DPOOL_OP) + add_definitions(-DRELU_OP) +else () + add_definitions(-DBATCHNORM_OP) + add_definitions(-DBOXCODER_OP) + add_definitions(-DCONCAT_OP) + add_definitions(-DCONV_OP) + add_definitions(-DDEPTHWISECONV_OP) + add_definitions(-DELEMENTWISEADD_OP) + add_definitions(-DFUSIONCONVADD_OP) + add_definitions(-DCONVADDRELU_OP) + add_definitions(-DFUSION_FC_OP) + add_definitions(-DLRN_OP) + add_definitions(-DMUL_OP) + add_definitions(-DMULTICLASSNMS_OP) + add_definitions(-DPOOL_OP) + add_definitions(-DPRIORBOX_OP) + add_definitions(-DRELU_OP) + add_definitions(-DRESHAPE_OP) + add_definitions(-DSIGMOID_OP) + add_definitions(-DSOFTMAX_OP) + add_definitions(-DTRANSPOSE_OP) +endif() + add_library(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) +if (googlenet) +elseif (mobilenet) +elseif (yolo) +elseif (squeezenet) +elseif(resnet) +else () +endif() add_subdirectory(test) diff --git a/build.sh b/build.sh index d9c462ca7c218a67964172fa7970a765ffea764d..7ef05924d94315d1c55abc944479e9a6c6f6125f 100755 --- a/build.sh +++ b/build.sh @@ -33,6 +33,8 @@ build_for_mac() { } build_for_android() { + + if [ -z "${ANDROID_NDK}" ]; then echo "ANDROID_NDK not found!" exit -1 @@ -55,11 +57,13 @@ build_for_android() { exit -1 fi + MODE="Release" ANDROID_PLATFORM_VERSION="android-15" TOOLCHAIN_FILE="./tools/android-cmake/android.toolchain.cmake" ANDROID_ARM_MODE="arm" - + if [ $# -eq 1 ]; then + NET=$1 cmake . \ -B"build/release/${PLATFORM}" \ -DANDROID_ABI="${ABI}" \ @@ -69,10 +73,24 @@ build_for_android() { -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ -DANDROID_STL=c++_static \ -DANDROID=true \ + -D"${NET}=true" \ -D"${ARM_PLATFORM}"=true + else + cmake . \ + -B"build/release/${PLATFORM}" \ + -DANDROID_ABI="${ABI}" \ + -DCMAKE_BUILD_TYPE="${MODE}" \ + -DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \ + -DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \ + -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ + -DANDROID_STL=c++_static \ + -DANDROID=true \ + -D"${ARM_PLATFORM}"=true + fi cd "./build/release/${PLATFORM}" make -j 8 + } build_for_ios() { @@ -106,15 +124,44 @@ if [ $# -lt 1 ]; then echo "available targets: mac|linux|ios|android" echo "sample usage: ./build.sh mac" else - if [ $1 = "mac" ]; then - build_for_mac - elif [ $1 = "linux" ]; then - build_for_linux - elif [ $1 = "android" ]; then - build_for_android - elif [ $1 = "ios" ]; then - build_for_ios - else - build_error + if [ $# -eq 2 ]; then + + if [[$2 != "googlenet"]] -a [[$2 != "mobilenet"]] -a [[$2 != "yolo"]] -a [[$2 != "squeezenet"]] -a [[$2 != "resnet"]]; then + if [ $1 = "mac" ]; then + build_for_mac + elif [ $1 = "linux" ]; then + build_for_linux + elif [ $1 = "android" ]; then + build_for_android + elif [ $1 = "ios" ]; then + build_for_ios + else + build_error + fi + else + if [ $1 = "mac" ]; then + build_for_mac $2 + elif [ $1 = "linux" ]; then + build_for_linux $2 + elif [ $1 = "android" ]; then + build_for_android $2 + elif [ $1 = "ios" ]; then + build_for_ios $2 + else + build_error + fi + fi + else + if [ $1 = "mac" ]; then + build_for_mac + elif [ $1 = "linux" ]; then + build_for_linux + elif [ $1 = "android" ]; then + build_for_android + elif [ $1 = "ios" ]; then + build_for_ios + else + build_error + fi fi fi diff --git a/src/common/enforce.h b/src/common/enforce.h index 52bda2258a00c7444762fe8297380c1c7752dd42..4b7c8dc0e267bc2862d2665e71085d679dceb1ff 100644 --- a/src/common/enforce.h +++ b/src/common/enforce.h @@ -18,7 +18,6 @@ limitations under the License. */ #include #include #include -#include #include #endif diff --git a/src/framework/attribute.cpp b/src/framework/attribute.cpp index 01b0ed523c2ccf125c4bb81d3d50ff5e4b289c7e..8b150f4e9e6aa3ccc30f13f661ff9cd6be79ae7a 100644 --- a/src/framework/attribute.cpp +++ b/src/framework/attribute.cpp @@ -17,14 +17,8 @@ limitations under the License. */ namespace paddle_mobile { namespace framework { -/* - * Variant, std::vector, - std::vector, bool, std::vector, BlockDesc *, - int64_t> - * */ - struct PrintVistor : Vistor { - PrintVistor(Print &printer) : printer_(printer) {} + explicit PrintVistor(Print &printer) : printer_(printer) {} template Print &operator()(const T &value) { printer_ << value; diff --git a/src/framework/attribute.h b/src/framework/attribute.h index b77d94521e8be9bdfdfd00ca1628bdefc60d688d..7db107ceef0d8fa293184bbc01f10ec687984605 100644 --- a/src/framework/attribute.h +++ b/src/framework/attribute.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once +#include #include +#include #include "common/enforce.h" #include "common/log.h" #include "common/variant.h" @@ -22,28 +24,15 @@ limitations under the License. */ namespace paddle_mobile { namespace framework { +using std::string; +using std::vector; class BlockDesc; class Attribute { public: - /* - * PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INT = 0, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOAT = 1, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRING = 2, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS = 3, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS = 4, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS = 5, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN = 6, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS = 7, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BLOCK = 8, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG = 9 - PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE) - * - * */ static Attribute GetAttrValue( PaddleMobile__Framework__Proto__OpDesc__Attr *attr_desc) { - // std::cout << "begin get attr value" << std::endl; Attribute attr; switch (attr_desc->type) { case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN: { @@ -63,35 +52,35 @@ class Attribute { break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS: { - std::vector val(attr_desc->n_bools); + vector val(attr_desc->n_bools); for (int i = 0; i < attr_desc->n_bools; ++i) { val[i] = attr_desc->bools[i]; } - attr.Set>(val); + attr.Set>(val); break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS: { - std::vector val(attr_desc->n_ints); + vector val(attr_desc->n_ints); for (int i = 0; i < attr_desc->n_ints; ++i) { val[i] = attr_desc->ints[i]; } - attr.Set>(val); + attr.Set>(val); break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS: { - std::vector val(attr_desc->n_floats); + vector val(attr_desc->n_floats); for (int i = 0; i < attr_desc->n_floats; ++i) { val[i] = attr_desc->floats[i]; } - attr.Set>(val); + attr.Set>(val); break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS: { - std::vector val(attr_desc->n_strings); + vector val(attr_desc->n_strings); for (int i = 0; i < attr_desc->n_strings; ++i) { val[i] = attr_desc->strings[i]; } - attr.Set>(val); + attr.Set>(val); break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG: { @@ -122,21 +111,18 @@ class Attribute { return vistor(attr.variant_.Get()); } else if (attr.variant_.TypeId() == typeid(float).hash_code()) { return vistor(attr.variant_.Get()); - } else if (attr.variant_.TypeId() == typeid(std::string).hash_code()) { - return vistor(attr.variant_.Get()); - } else if (attr.variant_.TypeId() == typeid(std::vector).hash_code()) { - return vistor(attr.variant_.Get>()); - } else if (attr.variant_.TypeId() == - typeid(std::vector).hash_code()) { - return vistor(attr.variant_.Get>()); - } else if (attr.variant_.TypeId() == - typeid(std::vector).hash_code()) { - return vistor(attr.variant_.Get>()); + } else if (attr.variant_.TypeId() == typeid(string).hash_code()) { + return vistor(attr.variant_.Get()); + } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + return vistor(attr.variant_.Get>()); + } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + return vistor(attr.variant_.Get>()); + } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + return vistor(attr.variant_.Get>()); } else if (attr.variant_.TypeId() == typeid(bool).hash_code()) { return vistor(attr.variant_.Get()); - } else if (attr.variant_.TypeId() == - typeid(std::vector).hash_code()) { - return vistor(attr.variant_.Get>()); + } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + return vistor(attr.variant_.Get>()); } else if (attr.variant_.TypeId() == typeid(int64_t).hash_code()) { return vistor(attr.variant_.Get()); } else { @@ -145,24 +131,21 @@ class Attribute { } private: - Variant, std::vector, - std::vector, bool, std::vector, BlockDesc *, - int64_t> + Variant, vector, vector, bool, + vector, BlockDesc *, int64_t> variant_; }; -using AttributeMap = std::unordered_map; +using AttributeMap = std::unordered_map; class AttrReader { public: explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {} template - inline T Get(const std::string &name) const { - // PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should - // be in - // AttributeMap", - // name); + inline T Get(const string &name) const { + PADDLE_MOBILE_ENFORCE(attrs_.count(name) != 0, + "%s should be in AttributeMap", name); return ((Attribute)attrs_.at(name)).Get(); } diff --git a/src/framework/data_layout.h b/src/framework/data_layout.h index 72c16c36733c0660ae2cf46de31031370eed444a..9944c88c8fefa9183445b93b3b703a5999d1b682 100644 --- a/src/framework/data_layout.h +++ b/src/framework/data_layout.h @@ -54,7 +54,6 @@ inline std::string DataLayoutToString(const DataLayout &data_layout) { return "ANY_LAYOUT"; default: break; - // std::cout << "unknown DataLayou %d", data_layout; } } diff --git a/src/framework/data_transform.cpp b/src/framework/data_transform.cpp deleted file mode 100644 index a6be4d2fcbbc6e0dd2adb9f71d644b2bd60d4259..0000000000000000000000000000000000000000 --- a/src/framework/data_transform.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "framework/data_transform.h" - -namespace paddle_mobile { -namespace framework { - -static void PassTensorData(Tensor *from, Tensor *to) { - to->ShareDataWith(*from); - *from = Tensor(); -} - -void DataTransform(const OpKernelType &expected_kernel_type, - const OpKernelType &kernel_type_for_var, - const Tensor &input_tensor, Tensor *output_tensor) { - bool transformed = false; - Tensor in; - in.ShareDataWith(input_tensor); - Tensor out; - - // // do layout transform - // if (NeedTransformLayout(expected_kernel_type.data_layout_, - // kernel_type_for_var.data_layout_)) { - // TransDataLayout(kernel_type_for_var, expected_kernel_type, in, - // &out); - // transformed = true; - // PassTensorData(&out, &in); - // } - // - // // do data type transform - // if (expected_kernel_type.data_type_ != - // kernel_type_for_var.data_type_) { - // TransDataType(kernel_type_for_var, expected_kernel_type, in, - // &out); - // transformed = true; - // PassTensorData(&out, &in); - // } - // - // // do device transform - // if (!platform::is_same_place(kernel_type_for_var.place_, - // expected_kernel_type.place_)) { - // TransDataDevice(in, expected_kernel_type.place_, &out); - // transformed = true; - // PassTensorData(&out, &in); - // } - // - // PADDLE_ENFORCE(transformed, "No transform is applied, please - // check!"); - // get output data - output_tensor->ShareDataWith(in); -} - -void CopyVariableWithTensor(const Variable &in_var, const Tensor &tensor, - Variable *out_var) { - // if (in_var.IsType()) { - // auto& in_lod_tensor = in_var.Get(); - // auto* tran_lod_tensor = out_var.GetMutable(); - // tran_lod_tensor->set_lod(in_lod_tensor.lod()); - // tran_lod_tensor->set_layout(in_lod_tensor.layout()); - // tran_lod_tensor->ShareDataWith(tensor); - // } else if (in_var.IsType()) { - // auto& in_selected_rows = in_var.Get(); - // auto* trans_selected_rows = - // out_var.GetMutable(); - // trans_selected_rows->set_height(in_selected_rows.height()); - // trans_selected_rows->set_rows(in_selected_rows.rows()); - // trans_selected_rows->mutable_value()->ShareDataWith(tensor); - // } else { - // PADDLE_THROW("unknown var type"); - // } -} - -} // namespace framework -} // namespace paddle_mobile diff --git a/src/framework/data_type.h b/src/framework/data_type.h deleted file mode 100644 index ddfc0dcc4adf8e5897f5f4ea67f9514889863f32..0000000000000000000000000000000000000000 --- a/src/framework/data_type.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -namespace paddle_mobile { -namespace framework { - -// inline proto::VarType::Type ToDataType(std::type_index type) { -// using namespace paddle_mobile::framework::proto; -// if (typeid(float).hash_code() == type.hash_code()) { -// return proto::VarType::FP32; -// } else if (typeid(double).hash_code() == type.hash_code()) { -// return proto::VarType::FP64; -// } else if (typeid(int).hash_code() == type.hash_code()) { -// return proto::VarType::INT32; -// } else if (typeid(int64_t).hash_code() == type.hash_code()) { -// return proto::VarType::INT64; -// } else if (typeid(bool).hash_code() == type.hash_code()) { -// return proto::VarType::BOOL; -// } else { -//// PADDLE_THROW("Not supported"); -// } -// } -} // namespace framework -} // namespace paddle_mobile diff --git a/src/framework/ddim.cpp b/src/framework/ddim.cpp index db6f2cd6aba92fec6a42839c0e3198ac749807b0..4fa01564d5f8cc8b2521903ff310b035d39a635e 100644 --- a/src/framework/ddim.cpp +++ b/src/framework/ddim.cpp @@ -183,7 +183,7 @@ DDim DDim::operator*(DDim d) const { int64_t get(const DDim &ddim, int idx) { return ddim[idx]; } -void set(DDim &ddim, int idx, int value) { ddim[idx] = value; } +void set(DDim *ddim, int idx, int value) { (*ddim)[idx] = value; } /// @cond HIDDEN struct VectorizeVisitor : Vistor { diff --git a/src/framework/ddim.h b/src/framework/ddim.h index 88039b2e0a57b4f79247129d1d95e4d5954da6c6..f0b9db9506804478dd68ddde8821cd717b35cbe8 100644 --- a/src/framework/ddim.h +++ b/src/framework/ddim.h @@ -83,17 +83,6 @@ struct DDim { int64_t operator[](int idx) const; - // template - // typename Visitor::result_type apply_visitor(Visitor& visitor) { - // return var.apply_visitor(visitor); - // } - // - // template - // typename Visitor::result_type apply_visitor(Visitor& visitor) - // const { - // return var.apply_visitor(visitor); - // } - DDimVar getVar() { return var; } bool operator==(DDim d) const; @@ -126,7 +115,7 @@ DDim make_ddim(std::initializer_list dims); int64_t get(const DDim &dim, int idx); -void set(DDim &dim, int idx, int val); +void set(DDim *dim, int idx, int val); std::vector vectorize(const DDim &ddim); diff --git a/src/framework/lod_tensor.cpp b/src/framework/lod_tensor.cpp index 48c2c46989e2d477ed0a005f25a8252da0955f13..dc0b77f32e0bea4e901c9661a0ec9e7877ce3d5a 100644 --- a/src/framework/lod_tensor.cpp +++ b/src/framework/lod_tensor.cpp @@ -42,23 +42,10 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) { } std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { - // PADDLE_ENFORCE(t.type().hash_code() == - // typeid(float).hash_code()); - - // if (!platform::is_cpu_place(t.place())) { - // LoDTensor tt; - // framework::TensorCopy(t, platform::CPUPlace(), &tt); - // platform::DeviceContextPool &pool = - // platform::DeviceContextPool::Instance(); auto &dev_ctx = - // *pool.Get(t.place()); dev_ctx.Wait(); - // - // os << tt; - // return os; - // } - + PADDLE_MOBILE_ENFORCE(t.type().hash_code() == typeid(float).hash_code(), + "t.type() is not float"); os << "dim: " << t.dims() << "\n"; os << "lod: " << t.lod() << "\n"; - // only print first ten elements int64_t size = t.numel() < 10 ? t.numel() : 10; for (int64_t i = 0; i < size; ++i) { @@ -76,9 +63,9 @@ std::string LoDToString(const LoD &lod) { LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, size_t elem_end) { - // PADDLE_ENFORCE_LT(level, in.size()); - // PADDLE_ENFORCE_LT(elem_end, in[level].size()); - + PADDLE_MOBILE_ENFORCE(level < in.size(), "level should >= in.size()"); + PADDLE_MOBILE_ENFORCE(elem_end < in[level].size(), + "elem_end >= in[level].size()"); LoD res; res.resize(in.size() - level); // copy the first level @@ -211,8 +198,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, LoD sub_lod; for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) { - // PADDLE_ENFORCE_LE(start_idx, end_idx); - // PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size()); + PADDLE_MOBILE_ENFORCE(start_idx <= end_idx, "start_idx > end_idx"); + PADDLE_MOBILE_ENFORCE(end_idx < lod[level_idx].size(), + "end_idx >= lod[level_idx].size()"); std::vector level_lens; for (size_t i = start_idx; i < end_idx; ++i) { level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]); @@ -226,10 +214,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, } void AppendLoD(LoD *lod, const LoD &lod_length) { - // PADDLE_ENFORCE( - // lod->empty() || lod->size() == lod_length.size(), - // "The lod_length should has the same size with the appended - // lod."); + PADDLE_MOBILE_ENFORCE( + lod->empty() || lod->size() == lod_length.size(), + "The lod_length should has the same size with the appended lod."); if (lod->empty()) { for (size_t i = 0; i < lod_length.size(); ++i) { lod->emplace_back(1, 0); // size = 1, value = 0; diff --git a/src/framework/op_info.h b/src/framework/op_info.h index 7475d155232e31cf00dab6273200f5bc4671f2e9..16b3487955ce05721e6e3f3e79b6d8ebd180e020 100644 --- a/src/framework/op_info.h +++ b/src/framework/op_info.h @@ -25,9 +25,8 @@ template struct OpInfo { OpCreator creator_; const OpCreator &Creator() const { - // PADDLE_ENFORCE_NOT_NULL(creator_, - // "Operator Creator has not been - // registered"); + PADDLE_MOBILE_ENFORCE(creator_ != nullptr, + "Operator Creator has not been registered"); return creator_; } }; @@ -48,17 +47,15 @@ class OpInfoMap { } void Insert(const std::string &type, const OpInfo &info) { - // PADDLE_ENFORCE(!Has(type), "Operator %s has been - // registered", type); + PADDLE_MOBILE_ENFORCE(!Has(type), "Operator %s has been registered", + type.c_str()); map_.insert({type, info}); } const OpInfo &Get(const std::string &type) const { auto op_info_ptr = GetNullable(type); - // PADDLE_ENFORCE_NOT_NULL(op_info_ptr, "Operator %s has not - // been - // registered", - // type); + PADDLE_MOBILE_ENFORCE(op_info_ptr != nullptr, + "Operator %s has not been registered", type.c_str()); return *op_info_ptr; } diff --git a/src/framework/program/program-optimize/node.cpp b/src/framework/program/program-optimize/node.cpp index eba5e8b6504e04ec3f9d0d235cc04efd4937baae..f77fc47f95bf2de487f74d05e332b9de9648175f 100644 --- a/src/framework/program/program-optimize/node.cpp +++ b/src/framework/program/program-optimize/node.cpp @@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include - #include "framework/operator.h" #include "framework/program/program-optimize/node.h" diff --git a/src/framework/scope.cpp b/src/framework/scope.cpp index c5ee2d39fa7a7bf4c1c7b1c2f3fb8f1e92f4e455..664499d7e635c75ecc277bfc708dda908a25b170 100644 --- a/src/framework/scope.cpp +++ b/src/framework/scope.cpp @@ -76,7 +76,6 @@ void Scope::DeleteScope(Scope *scope) const { auto it = std::find(kids_.begin(), kids_.end(), scope); kids_.erase(it); delete scope; - // deferent } void Scope::EraseVars(const std::vector &var_names) { @@ -104,14 +103,6 @@ void Scope::Rename(const std::string &origin_name, vars_[new_name] = origin_it->second; vars_.erase(origin_it); } -// -// std::string Scope::Rename(const std::string& origin_name) -// const { -// auto var_name = string::Sprintf("%p.%d", this, -// vars_.size()); -// Rename(origin_name, var_name); -// return var_name; -// } Variable *Scope::FindVarLocally(const std::string &name) const { auto it = vars_.find(name); diff --git a/src/io.h b/src/io.h index 13a6761b81c2679afc02d220c11056584f84f5dd..48428c5ceb4abee67fba6f6fe861067d10988be4 100644 --- a/src/io.h +++ b/src/io.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once -#include #include #include #include +#include #include "common/types.h" #include "framework/lod_tensor.h" @@ -27,7 +27,7 @@ limitations under the License. */ namespace paddle_mobile { -template +template class Loader { public: const framework::Program Load(const std::string &dirname, @@ -39,7 +39,7 @@ class Loader { const std::string &file_path); }; -template +template class Executor { public: typedef typename PrecisionTrait

::ptype Ptype; diff --git a/src/operators/batchnorm_op.cpp b/src/operators/batchnorm_op.cpp index 1f8a1698f4281174d2503650bde5deb0ef9825e9..815eac8806c82c2a167fc7c462d2e76f1bb233fd 100644 --- a/src/operators/batchnorm_op.cpp +++ b/src/operators/batchnorm_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef BATCHNORM_OP + #include "batchnorm_op.h" namespace paddle_mobile { @@ -29,3 +31,5 @@ template class BatchNormOp; namespace ops = paddle_mobile::operators; USE_OP(batch_norm); REGISTER_OPERATOR(batch_norm, ops::BatchNormOp); + +#endif diff --git a/src/operators/batchnorm_op.h b/src/operators/batchnorm_op.h index 760466eeddcb472ed2a47625b786a021ce7c1ef5..671b2f09f6cbc47570493a8bf1d2c4e23dde9f8b 100644 --- a/src/operators/batchnorm_op.h +++ b/src/operators/batchnorm_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef BATCHNORM_OP + #pragma once #include @@ -47,3 +49,5 @@ class BatchNormOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/box_coder_op.cpp b/src/operators/box_coder_op.cpp index ca653b5711241e77a9df308922aedb0551b1103f..22d006a258ca0cd18b63dc72aed6a02405ff6e81 100644 --- a/src/operators/box_coder_op.cpp +++ b/src/operators/box_coder_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef BOXCODER_OP + #include "operators/box_coder_op.h" #include namespace paddle_mobile { @@ -52,3 +54,5 @@ template class BoxCoderOp; namespace ops = paddle_mobile::operators; USE_OP(box_coder); REGISTER_OPERATOR(box_coder, ops::BoxCoderOp); + +#endif diff --git a/src/operators/box_coder_op.h b/src/operators/box_coder_op.h index a2203e1d89f8b5b6270c1576711a4c008d927e34..2d3cd0d8eaa21df3384a22e0659b10c3eac394a3 100644 --- a/src/operators/box_coder_op.h +++ b/src/operators/box_coder_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef BOXCODER_OP + #pragma once #include @@ -50,3 +52,5 @@ class BoxCoderOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/concat_op.cpp b/src/operators/concat_op.cpp index 6744b47b7728558f95fad0435979841a73a7a6f6..26f5e7d4e48ee2c3402a821b49757b1b0914828a 100644 --- a/src/operators/concat_op.cpp +++ b/src/operators/concat_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONCAT_OP + #include "concat_op.h" namespace paddle_mobile { @@ -62,3 +64,5 @@ template class ConcatOp; namespace ops = paddle_mobile::operators; USE_OP(concat); REGISTER_OPERATOR(concat, ops::ConcatOp); + +#endif diff --git a/src/operators/concat_op.h b/src/operators/concat_op.h index 15160e20a403d73bb11e982f5a527454f26b5dd6..ad2db52a0b65a8474a2534d592b7bbd53924f8cf 100644 --- a/src/operators/concat_op.h +++ b/src/operators/concat_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONCAT_OP + #pragma once #include @@ -45,3 +47,5 @@ class ConcatOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/conv_op.cpp b/src/operators/conv_op.cpp index bfddcf14acbba016c4e4333e05fcc7dd6eebc509..c8ec33333f596a6c10491cfdb826f1dc54d69c6f 100644 --- a/src/operators/conv_op.cpp +++ b/src/operators/conv_op.cpp @@ -12,9 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONV_OP + #include "operators/conv_op.h" #include -#include "framework/data_type.h" #include "framework/op_proto_maker.h" #include "framework/op_registry.h" @@ -23,30 +24,11 @@ namespace operators { template void ConvOp::InferShape() const { - // std::cout << " begin get dims: " << std::endl; - auto in_dims = param_.Input()->dims(); - - // std::cout << " end get in dims: " << std::endl; - - // std::cout << " in_dims: " << in_dims << std::endl; - - // std::cout << " begin get Filter " << std::endl; - auto filter_dims = param_.Filter()->dims(); - - // std::cout << " end get Filter " << std::endl; - - // std::cout << " begin get Attrs " << std::endl; - const std::vector &strides = param_.Strides(); - - // std::cout << " end get Attrs " << strides[0] << std::endl; - std::vector paddings = param_.Paddings(); - int groups = param_.Groups(); - std::vector dilations = param_.Dilations(); PADDLE_MOBILE_ENFORCE((in_dims.size() == filter_dims.size() && @@ -73,3 +55,5 @@ template class ConvOp; namespace ops = paddle_mobile::operators; USE_OP(conv2d); REGISTER_OPERATOR(conv2d, ops::ConvOp); + +#endif diff --git a/src/operators/conv_op.h b/src/operators/conv_op.h index f15f286b606db1403b0e0e609bfc38caac2c5105..0a26ce6c3f1ee005e982f10dcc3b38853124bdfb 100644 --- a/src/operators/conv_op.h +++ b/src/operators/conv_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONV_OP + #pragma once #include @@ -53,3 +55,5 @@ inline int ConvOutputSize(int input_size, int filter_size, int dilation, } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/depthwise_conv_op.cpp b/src/operators/depthwise_conv_op.cpp index 2538298175c5ea40d7e44338caee853a73c089c4..87c9746b4dfa1e74fcf3733656b9b3b27a8740fb 100644 --- a/src/operators/depthwise_conv_op.cpp +++ b/src/operators/depthwise_conv_op.cpp @@ -12,9 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef DEPTHWISECONV_OP + #include "operators/depthwise_conv_op.h" #include -#include "framework/data_type.h" #include "framework/op_proto_maker.h" #include "framework/op_registry.h" #include "operators/conv_op.h" @@ -55,3 +56,5 @@ template class DepthwiseConvOp; namespace ops = paddle_mobile::operators; USE_OP(depthwise_conv2d); REGISTER_OPERATOR(depthwise_conv2d, ops::DepthwiseConvOp); + +#endif diff --git a/src/operators/depthwise_conv_op.h b/src/operators/depthwise_conv_op.h index c47fa0ffcacd54a5ddf7280419ca1170173bde1b..37ba1b9ada32d75cb715dd86221758c71c6b1929 100644 --- a/src/operators/depthwise_conv_op.h +++ b/src/operators/depthwise_conv_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef DEPTHWISECONV_OP + #pragma once #include @@ -47,3 +49,5 @@ class DepthwiseConvOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/elementwise_add_op.cpp b/src/operators/elementwise_add_op.cpp index 1eff80152bfb193fc8cd3866d63b1ae4d55f4b9c..ff2cd2598814cf9a270090213c0524c165c66ced 100644 --- a/src/operators/elementwise_add_op.cpp +++ b/src/operators/elementwise_add_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef ELEMENTWISEADD_OP + #include "elementwise_add_op.h" namespace paddle_mobile { @@ -29,3 +31,5 @@ template class ElementwiseAddOp; namespace ops = paddle_mobile::operators; USE_OP(elementwise_add); REGISTER_OPERATOR(elementwise_add, ops::ElementwiseAddOp); + +#endif diff --git a/src/operators/elementwise_add_op.h b/src/operators/elementwise_add_op.h index 7dd7e147a0630450c3ad9f830d661b2b92a5f995..727d569fb3eada6b406c02f127c04022eab4ac2d 100644 --- a/src/operators/elementwise_add_op.h +++ b/src/operators/elementwise_add_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef ELEMENTWISEADD_OP + #pragma once #include @@ -46,3 +48,5 @@ class ElementwiseAddOp : public framework::OperatorWithKernel { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/fusion_conv_add.cpp b/src/operators/fusion_conv_add.cpp index 433e3ee741d37fefead87fc6d08723fde8142387..fe380bddca585e434418513d5152c1df0426e80d 100644 --- a/src/operators/fusion_conv_add.cpp +++ b/src/operators/fusion_conv_add.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef FUSIONCONVADD_OP + #include "operators/fusion_conv_add.h" namespace paddle_mobile { namespace operators { @@ -25,3 +27,5 @@ template class FushionConvAddOp; namespace ops = paddle_mobile::operators; USE_OP(conv_add); REGISTER_OPERATOR(conv_add, ops::FushionConvAddOp); + +#endif diff --git a/src/operators/fusion_conv_add.h b/src/operators/fusion_conv_add.h index c6a1d9fdff246084542d50230a7649e938143c4a..911df63dd4e2a5c00ac364d85dd4916afb72e627 100644 --- a/src/operators/fusion_conv_add.h +++ b/src/operators/fusion_conv_add.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef FUSIONCONVADD_OP + #pragma once #include @@ -66,3 +68,5 @@ class FushionConvAddOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/fusion_conv_add_relu_op.cpp b/src/operators/fusion_conv_add_relu_op.cpp index 92f6fcf848f169eed141b1456c05e6fbd8ca9895..bf33db7d78e995c087478f947ece7038953fa42f 100644 --- a/src/operators/fusion_conv_add_relu_op.cpp +++ b/src/operators/fusion_conv_add_relu_op.cpp @@ -12,4 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONVADDRELU_OP + #include "fusion_conv_add_relu_op.h" + +#endif diff --git a/src/operators/fusion_conv_add_relu_op.h b/src/operators/fusion_conv_add_relu_op.h index 43279e1f995f4b18ca976e51d1a4f81847c975b9..4825a01be95f31d11418fe114700aaaa248e0d7e 100644 --- a/src/operators/fusion_conv_add_relu_op.h +++ b/src/operators/fusion_conv_add_relu_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONVADDRELU_OP + #pragma once #include "framework/operator.h" @@ -49,3 +51,5 @@ class ConvAddReluOp { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/fusion_fc_op.cpp b/src/operators/fusion_fc_op.cpp index 0f1be5c29fee1f741b773bbfa11b50b5aa49b8b7..8f639e212a1a922fb1a943d2582dd692e1bfabee 100644 --- a/src/operators/fusion_fc_op.cpp +++ b/src/operators/fusion_fc_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef FUSION_FC_OP + #include "operators/fusion_fc_op.h" namespace paddle_mobile { namespace operators { @@ -54,3 +56,5 @@ template class FushionFcOp; namespace ops = paddle_mobile::operators; USE_OP(fc); REGISTER_OPERATOR(fc, ops::FushionFcOp); + +#endif diff --git a/src/operators/fusion_fc_op.h b/src/operators/fusion_fc_op.h index a0eeebca5f5f028bec75703a4a4befeb18e374fe..c5419de9ff36898283c3743908b017e01c4c913c 100644 --- a/src/operators/fusion_fc_op.h +++ b/src/operators/fusion_fc_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef FUSION_FC_OP + #pragma once #include @@ -71,3 +73,5 @@ class FushionFcOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/batchnorm_kernel.cpp b/src/operators/kernel/arm/batchnorm_kernel.cpp index e28bdd7147f300cb181ffc5e0aeebec412ec45e7..e441e6cf3816ee5a5d21b5fcd1d1dc02d59ae39d 100644 --- a/src/operators/kernel/arm/batchnorm_kernel.cpp +++ b/src/operators/kernel/arm/batchnorm_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef BATCHNORM_OP + #pragma once #include "operators/kernel/batchnorm_kernel.h" @@ -91,3 +93,5 @@ void BatchNormKernel::Compute(const BatchNormParam ¶m) const { } } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/box_coder_kernel.cpp b/src/operators/kernel/arm/box_coder_kernel.cpp index d604c3d2a8d7f7fb1c817397a61cb156f1d0f392..9654228911af77e751e4ef9d1b92fb92ae30591d 100644 --- a/src/operators/kernel/arm/box_coder_kernel.cpp +++ b/src/operators/kernel/arm/box_coder_kernel.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#pragma once +#ifdef BOXCODER_OP #include "operators/kernel/box_coder_kernel.h" @@ -135,3 +135,5 @@ void BoxCoderKernel::Compute(const BoxCoderParam& param) const { } } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/concat_kernel.cpp b/src/operators/kernel/arm/concat_kernel.cpp index 705b698dbe9e9768713417f85ae2879df66acf9e..329677fb11e6ee2db74b5191586ac6157ede9697 100644 --- a/src/operators/kernel/arm/concat_kernel.cpp +++ b/src/operators/kernel/arm/concat_kernel.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#pragma once +#ifdef CONCAT_OP #include "operators/kernel/concat_kernel.h" @@ -85,3 +85,5 @@ void ConcatKernel::Compute(const ConcatParam ¶m) const { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/conv_kernel.cpp b/src/operators/kernel/arm/conv_kernel.cpp index f04b8156c9d3c88520b1c74b60a20f41e7fedc98..546ae33407d4c5affd6459d4167ba5b373887f12 100644 --- a/src/operators/kernel/arm/conv_kernel.cpp +++ b/src/operators/kernel/arm/conv_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONV_OP + #include "operators/kernel/conv_kernel.h" namespace paddle_mobile { @@ -112,3 +114,5 @@ template class ConvKernel; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/depthwise_conv_kernel.cpp b/src/operators/kernel/arm/depthwise_conv_kernel.cpp index 1da52fa8d469bd81d043843d7bcca3a7b01f6663..6cd4538c4540ff11d91a6f49d088ad38f6d992e7 100644 --- a/src/operators/kernel/arm/depthwise_conv_kernel.cpp +++ b/src/operators/kernel/arm/depthwise_conv_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef DEPTHWISECONV_OP + #include "operators/kernel/depthwise_conv_kernel.h" #include "operators/kernel/conv_kernel.h" @@ -124,3 +126,5 @@ template class DepthwiseConvKernel; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/elementwise_add_kernel.cpp b/src/operators/kernel/arm/elementwise_add_kernel.cpp index f8d40ad17ff09d77c26a9f32a87190f1cdd6038a..02aabfe3ce0622df80c86906f45ab5cc688c7b12 100644 --- a/src/operators/kernel/arm/elementwise_add_kernel.cpp +++ b/src/operators/kernel/arm/elementwise_add_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef ELEMENTWISEADD_OP + #pragma once #include "operators/kernel/elementwise_add_kernel.h" @@ -40,3 +42,5 @@ template class ElementwiseAddKernel; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/fushion_fc_kernel.cpp b/src/operators/kernel/arm/fushion_fc_kernel.cpp index ebec90aa27154334488329d079b76d14630e3294..ea88252c21ab2f13f0564602ac9b922be521578b 100644 --- a/src/operators/kernel/arm/fushion_fc_kernel.cpp +++ b/src/operators/kernel/arm/fushion_fc_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef FUSION_FC_OP + #pragma once #include "operators/kernel/fushion_fc_kernel.h" @@ -65,3 +67,5 @@ void FushionFcKernel::Compute(const FushionFcParam ¶m) const { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/lrn_kernel.cpp b/src/operators/kernel/arm/lrn_kernel.cpp index 47e64d487d72eb191e6b0ec8751c877363dd7b48..3e12b62508204b38150d7fcc82cef99f7617ba09 100644 --- a/src/operators/kernel/arm/lrn_kernel.cpp +++ b/src/operators/kernel/arm/lrn_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef LRN_OP + #pragma once #include "operators/kernel/lrn_kernel.h" @@ -42,3 +44,5 @@ template class LrnKernel; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/mul_kernel.cpp b/src/operators/kernel/arm/mul_kernel.cpp index f1eea3950cebe8d4c27b3481bf527e75f26c99aa..70bcac2461cdef535de8c9759ec10113e45b7ae2 100644 --- a/src/operators/kernel/arm/mul_kernel.cpp +++ b/src/operators/kernel/arm/mul_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef MUL_OP + #pragma once #include "operators/kernel/mul_kernel.h" @@ -48,3 +50,5 @@ template class MulKernel; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/multiclass_nms_kernel.cpp b/src/operators/kernel/arm/multiclass_nms_kernel.cpp index 61470ee31936f092e2f534c5534c1c78aaf5d44c..39f55dab38031db14b617e48eedb236eacd1b714 100644 --- a/src/operators/kernel/arm/multiclass_nms_kernel.cpp +++ b/src/operators/kernel/arm/multiclass_nms_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef MULTICLASSNMS_OP + #pragma once #include "operators/kernel/multiclass_nms_kernel.h" @@ -273,3 +275,5 @@ void MultiClassNMSKernel::Compute( } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/pool_kernel.cpp b/src/operators/kernel/arm/pool_kernel.cpp index 6aa1b76058fdf8a9828321a23f26b1c17134d7c9..2809a802a6cf94c931e409aecfa0090139624a46 100644 --- a/src/operators/kernel/arm/pool_kernel.cpp +++ b/src/operators/kernel/arm/pool_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef POOL_OP + #include #include "common/log.h" @@ -73,3 +75,5 @@ void PoolKernel::Compute(const PoolParam ¶m) const { } } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/prior_box_kernel.cpp b/src/operators/kernel/arm/prior_box_kernel.cpp index fc61f43f3fe363c1f6d67f81ef37fb2d950f9717..e029c555d4d40745976be45b7a9c022eb62705c7 100644 --- a/src/operators/kernel/arm/prior_box_kernel.cpp +++ b/src/operators/kernel/arm/prior_box_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef PRIORBOX_OP + #pragma once #include "operators/kernel/prior_box_kernel.h" @@ -143,3 +145,5 @@ void PriorBoxKernel::Compute(const PriorBoxParam ¶m) const { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/relu_kernel.cpp b/src/operators/kernel/arm/relu_kernel.cpp index 586d981175184e2da03f2949390932b888d67f4a..854fa1d185ddb002aa37a10ade0683d841af8793 100644 --- a/src/operators/kernel/arm/relu_kernel.cpp +++ b/src/operators/kernel/arm/relu_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef RELU_OP + #pragma once #include "operators/kernel/relu_kernel.h" @@ -45,3 +47,5 @@ void ReluKernel::Compute(const ReluParam ¶m) const { } } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/reshape_kernel.cpp b/src/operators/kernel/arm/reshape_kernel.cpp index 7f7e80ece9f30631c109d0d27f4025e2617cec95..3d40309e97145e1df70f2a4191ee571c4a05627a 100644 --- a/src/operators/kernel/arm/reshape_kernel.cpp +++ b/src/operators/kernel/arm/reshape_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef RESHAPE_OP + #pragma once #include "operators/kernel/reshape_kernel.h" @@ -49,3 +51,5 @@ void ReshapeKernel::Compute(const ReshapeParam ¶m) const { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/sigmoid_kernel.cpp b/src/operators/kernel/arm/sigmoid_kernel.cpp index 74bc29878019dfe52de94f6fef966a416e04cc72..c03a8644cc086e14a24abd32bf2bdb347187ce0e 100644 --- a/src/operators/kernel/arm/sigmoid_kernel.cpp +++ b/src/operators/kernel/arm/sigmoid_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef SIGMOID_OP + #include "../sigmoid_kernel.h" #if __ARM_NEON #include "../../math/math_func_neon.h" @@ -25,35 +27,21 @@ using framework::Tensor; void sigmoid(const Tensor *X, Tensor *Y) { #if __ARM_NEON - DLOG << "step1"; const float *input = X->data(); - DLOG << "step11"; - float *output = Y->mutable_data(); - DLOG << "step2"; - const DDim &dDim = X->dims(); - DLOG << "step3"; - int axis_index = 1; if (dDim.size() < 4) { axis_index = 0; } - DLOG << "step4"; - DDim outer_ddim = paddle_mobile::framework::slice_ddim(dDim, 0, axis_index + 1); DDim inner_ddim = paddle_mobile::framework::slice_ddim(dDim, axis_index + 1, dDim.size()); - DLOG << "step5"; - int out_size = paddle_mobile::framework::product(outer_ddim); int inner_size = paddle_mobile::framework::product(inner_ddim); - DLOG << "step6"; #pragma omp parallel for - DLOG << "outsize=" << out_size; - DLOG << "innersize=" << inner_size; for (int i = 0; i < out_size; ++i) { const float *input_outer_ptr = input + i * inner_size; float *output_outer_ptr = output + i * inner_size; @@ -93,3 +81,5 @@ void SigmoidKernel::Compute(const SigmoidParam ¶m) const { template class SigmoidKernel; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/softmax_kernel.cpp b/src/operators/kernel/arm/softmax_kernel.cpp index 0a50fc0a0136b66df4f55c10decc84a541b52dce..542283242d09abfbad8830eb0b36136ed35a6ef6 100644 --- a/src/operators/kernel/arm/softmax_kernel.cpp +++ b/src/operators/kernel/arm/softmax_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef SOFTMAX_OP + #include "../softmax_kernel.h" #include "../../math/softmax.h" namespace paddle_mobile { @@ -29,3 +31,5 @@ void SoftmaxKernel::Compute(const SoftmaxParam ¶m) const { template class SoftmaxKernel; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/arm/transpose_kernel.cpp b/src/operators/kernel/arm/transpose_kernel.cpp index 92b5916ec40d53bb55c1cc4aaf0ce6ec9a9bfaeb..3ebe261fb8fe511022d6efbf4641898ef326319f 100644 --- a/src/operators/kernel/arm/transpose_kernel.cpp +++ b/src/operators/kernel/arm/transpose_kernel.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#pragma once +#ifdef TRANSPOSE_OP #include "operators/kernel/transpose_kernel.h" @@ -70,3 +70,5 @@ void TransposeKernel::Compute(const TransposeParam& param) const { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/batchnorm_kernel.h b/src/operators/kernel/batchnorm_kernel.h index ebace43e1c559df1bf997d05f68db862d1ed3cb4..6c795b2d5e9e7e81fb25d4a1a6dd3ca13c04bd9b 100644 --- a/src/operators/kernel/batchnorm_kernel.h +++ b/src/operators/kernel/batchnorm_kernel.h @@ -12,9 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef BATCHNORM_OP + +#pragma once + #include "framework/operator.h" #include "operators/op_param.h" -#pragma once; namespace paddle_mobile { namespace operators { @@ -30,3 +33,5 @@ class BatchNormKernel } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/box_coder_kernel.h b/src/operators/kernel/box_coder_kernel.h index 2d350202d091563f668f9209a1540bb0a32b6ac3..1c612b373cd086fcd566fe69e71eb77e4d1a30b6 100644 --- a/src/operators/kernel/box_coder_kernel.h +++ b/src/operators/kernel/box_coder_kernel.h @@ -12,14 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef BOXCODER_OP + +#pragma once + #include #include "framework/operator.h" #include "operators/math/transform.h" #include "operators/op_param.h" -#pragma once; - namespace paddle_mobile { namespace operators { @@ -31,3 +33,5 @@ class BoxCoderKernel }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/concat_kernel.h b/src/operators/kernel/concat_kernel.h index d91fb84f015851074e317980f1fe9ff930e9e399..3b649974e8bb670b7ec81c61f185a2d8f9b24ad0 100644 --- a/src/operators/kernel/concat_kernel.h +++ b/src/operators/kernel/concat_kernel.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONCAT_OP + #pragma once #include "framework/operator.h" #include "operators/op_param.h" @@ -29,3 +31,5 @@ class ConcatKernel : public framework::OpKernelBase { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/conv_kernel.h b/src/operators/kernel/conv_kernel.h index d43a174ffdbf0ca6dbb39e463b8e97652c7b0daf..06c0c2c55629d9762cffa0b2c5572050b95bc771 100644 --- a/src/operators/kernel/conv_kernel.h +++ b/src/operators/kernel/conv_kernel.h @@ -12,6 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONV_OP + +#pragma once + #include #include "framework/operator.h" #include "operators/math/im2col.h" @@ -19,8 +23,6 @@ limitations under the License. */ #include "operators/math/vol2col.h" #include "operators/op_param.h" -#pragma once; - namespace paddle_mobile { namespace operators { @@ -49,3 +51,5 @@ inline bool IsExpand(const std::vector &filter_dim, } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/depthwise_conv_kernel.h b/src/operators/kernel/depthwise_conv_kernel.h index 43ddfb25cd859a7e937577221215d8352b846bff..1ef76a573e27ff09fe7842ad78e9fe6042a742a1 100644 --- a/src/operators/kernel/depthwise_conv_kernel.h +++ b/src/operators/kernel/depthwise_conv_kernel.h @@ -12,14 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef DEPTHWISECONV_OP + +#pragma once + #include "framework/operator.h" #include "operators/math/im2col.h" #include "operators/math/math_function.h" #include "operators/math/vol2col.h" #include "operators/op_param.h" -#pragma once; - namespace paddle_mobile { namespace operators { @@ -32,3 +34,5 @@ class DepthwiseConvKernel : public OpKernelBase { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/elementwise_add_kernel.h b/src/operators/kernel/elementwise_add_kernel.h index 28b3bc29e593561d18512cbf1af947dd64cd9d87..7a2f92120105b9f9539937e00c392c0eb77e3830 100644 --- a/src/operators/kernel/elementwise_add_kernel.h +++ b/src/operators/kernel/elementwise_add_kernel.h @@ -12,7 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#pragma once; +#ifdef ELEMENTWISEADD_OP + +#pragma once #include "framework/operator.h" #include "operators/math/elementwise_op_function.h" @@ -31,3 +33,5 @@ class ElementwiseAddKernel }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/fpga/conv_kernel.cpp b/src/operators/kernel/fpga/conv_kernel.cpp index a50a5c59bdaaa3829602049bf88bf41fa02af53c..21badb0d8eaf125a6e46bf3283adca90a175b984 100644 --- a/src/operators/kernel/fpga/conv_kernel.cpp +++ b/src/operators/kernel/fpga/conv_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONV_OP + namespace paddle_mobile { namespace operators { @@ -22,3 +24,5 @@ namespace operators { // template class ConvKernel; } } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/fushion_fc_kernel.h b/src/operators/kernel/fushion_fc_kernel.h index 7597a7120d1840128810730ad3fab11fd01b10fa..612a91a8ab747d806e890eeaba91e7a93f8e25ab 100644 --- a/src/operators/kernel/fushion_fc_kernel.h +++ b/src/operators/kernel/fushion_fc_kernel.h @@ -12,11 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef FUSION_FC_OP + +#pragma once + #include "framework/operator.h" -#include "operators/math/math_function.h" #include "operators/op_param.h" - -#pragma once; +#include "operators/math/math_function.h" namespace paddle_mobile { namespace operators { @@ -29,3 +31,5 @@ class FushionFcKernel }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/lrn_kernel.h b/src/operators/kernel/lrn_kernel.h index f5fd8313482a92aad0c01d3e0acc9dcfcc83f2d8..ca04a45572bd922baa936bc151f7730c16131f40 100644 --- a/src/operators/kernel/lrn_kernel.h +++ b/src/operators/kernel/lrn_kernel.h @@ -12,9 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef LRN_OP + +#pragma once + #include "framework/operator.h" #include "operators/op_param.h" -#pragma once; namespace paddle_mobile { namespace operators { @@ -70,3 +73,5 @@ class LrnKernel : public framework::OpKernelBase { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/mali/conv_kernel.cpp b/src/operators/kernel/mali/conv_kernel.cpp index 75672549583ebc15867e5f279d5ce3a7137e5b70..695f937880328e8c2ffed91a8beee23e9a72899a 100644 --- a/src/operators/kernel/mali/conv_kernel.cpp +++ b/src/operators/kernel/mali/conv_kernel.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef CONV_OP + #include "operators/kernel/conv_kernel.h" namespace paddle_mobile { @@ -23,3 +25,5 @@ void ConvKernel::Compute(const ConvParam ¶m) const {} template class ConvKernel; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/mul_kernel.h b/src/operators/kernel/mul_kernel.h index 809c9b80b5ba0d610827d8fa5ff00d5ad7183ab9..1a7102f21d7e721b8243bb8e67f8b6bfc826707b 100644 --- a/src/operators/kernel/mul_kernel.h +++ b/src/operators/kernel/mul_kernel.h @@ -12,10 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef MUL_OP + +#pragma once + #include "framework/operator.h" -#include "operators/math/math_function.h" #include "operators/op_param.h" -#pragma once; +#include "operators/math/math_function.h" namespace paddle_mobile { namespace operators { @@ -29,3 +32,5 @@ class MulKernel : public framework::OpKernelBase { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/multiclass_nms_kernel.h b/src/operators/kernel/multiclass_nms_kernel.h index 4453197e5c866398bc6f8807ec921ff5638fbb71..82bafe2685423f8014d95b8fc875554567d2094a 100644 --- a/src/operators/kernel/multiclass_nms_kernel.h +++ b/src/operators/kernel/multiclass_nms_kernel.h @@ -12,10 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef MULTICLASSNMS_OP + +#pragma once + #include "framework/operator.h" -#include "operators/op_param.h" -#pragma once; +#include "operators/op_param.h" namespace paddle_mobile { namespace operators { @@ -28,3 +31,5 @@ class MultiClassNMSKernel }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/pool_kernel.h b/src/operators/kernel/pool_kernel.h index 5cb185dea6eaed0bbb50c5fd5d3450d4e92f18e7..5d53e8605dee3ef0c7864ffd480c62f03d71ead8 100644 --- a/src/operators/kernel/pool_kernel.h +++ b/src/operators/kernel/pool_kernel.h @@ -12,11 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef POOL_OP + #pragma once #include "framework/operator.h" -#include "operators/math/pooling.h" #include "operators/op_param.h" +#include "operators/math/pooling.h" namespace paddle_mobile { namespace operators { @@ -29,3 +31,5 @@ class PoolKernel : public OpKernelBase { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/prior_box_kernel.h b/src/operators/kernel/prior_box_kernel.h index c3cd399bfe9fad86b45c33d947dbbb3e4f99bade..2f35eb412077019efe8fa3ff140a35e46d0f4a6d 100644 --- a/src/operators/kernel/prior_box_kernel.h +++ b/src/operators/kernel/prior_box_kernel.h @@ -12,13 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef PRIORBOX_OP + +#pragma once + #include +#include "operators/op_param.h" #include "framework/operator.h" #include "operators/math/transform.h" -#include "operators/op_param.h" - -#pragma once; namespace paddle_mobile { namespace operators { @@ -55,3 +57,5 @@ class PriorBoxKernel }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/relu_kernel.h b/src/operators/kernel/relu_kernel.h index 83b4548f3e5421657ae6f79bd226e16e1aba7ffb..793268f35a78255f853c85d1af0d2ef0d3d328e5 100644 --- a/src/operators/kernel/relu_kernel.h +++ b/src/operators/kernel/relu_kernel.h @@ -12,10 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef RELU_OP + +#pragma once + #include "framework/operator.h" -#include "operators/op_param.h" -#pragma once; +#include "operators/op_param.h" namespace paddle_mobile { namespace operators { @@ -27,3 +30,5 @@ class ReluKernel : public framework::OpKernelBase { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/reshape_kernel.h b/src/operators/kernel/reshape_kernel.h index 7d5dcdf71de232b1c72180231731fcf76483b9e4..6b153e5fe3eba73f548fd1fc0ab9f95a5b390bf1 100644 --- a/src/operators/kernel/reshape_kernel.h +++ b/src/operators/kernel/reshape_kernel.h @@ -12,12 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#ifdef RESHAPE_OP + +#pragma once +#include #include "framework/operator.h" -#include "operators/op_param.h" -#pragma once; +#include "operators/op_param.h" namespace paddle_mobile { namespace operators { @@ -72,3 +74,5 @@ class ReshapeKernel : public framework::OpKernelBase { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/sigmoid_kernel.h b/src/operators/kernel/sigmoid_kernel.h index 8f5c787f3ff009ed1e334e61657d00454d6e4c0b..e901f02096c764537f268f628ccdc379f3a503e1 100644 --- a/src/operators/kernel/sigmoid_kernel.h +++ b/src/operators/kernel/sigmoid_kernel.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef SIGMOID_OP + #pragma once #include "framework/operator.h" @@ -27,3 +29,5 @@ class SigmoidKernel : public OpKernelBase { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/softmax_kernel.h b/src/operators/kernel/softmax_kernel.h index 5bdae46d288adef3c07c6b2735bdfe5e6ec0c1c3..2b2d753cf666a6eb58f70f2f43afbbefb3953d8b 100644 --- a/src/operators/kernel/softmax_kernel.h +++ b/src/operators/kernel/softmax_kernel.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef SOFTMAX_OP + #pragma once #include "framework/operator.h" @@ -30,3 +32,5 @@ class SoftmaxKernel : public OpKernelBase { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/kernel/transpose_kernel.h b/src/operators/kernel/transpose_kernel.h index aa7d8902097df441eaa28ea8a74b5e9234f7daea..82d73ac82cd28edbd5b6fc349748293fd00fcf45 100644 --- a/src/operators/kernel/transpose_kernel.h +++ b/src/operators/kernel/transpose_kernel.h @@ -12,13 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef TRANSPOSE_OP + +#pragma once + #include #include "framework/operator.h" #include "operators/op_param.h" -#pragma once; - namespace paddle_mobile { namespace operators { @@ -30,3 +32,5 @@ class TransposeKernel }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/lrn_op.cpp b/src/operators/lrn_op.cpp index cc89a034b4c43bcee7778cad0c16c614e74bb5fb..f072b22b063c6eb28cb5c0a183b51e6071c82bd3 100644 --- a/src/operators/lrn_op.cpp +++ b/src/operators/lrn_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef LRN_OP + #include "lrn_op.h" namespace paddle_mobile { @@ -29,3 +31,5 @@ template class LrnOp; namespace ops = paddle_mobile::operators; USE_OP(lrn); REGISTER_OPERATOR(lrn, ops::LrnOp); + +#endif diff --git a/src/operators/lrn_op.h b/src/operators/lrn_op.h index e5d98e1bb103307e1fae9c2460be19fe9d0f01a0..931c6b4ab069abf9eed496e433a114895f0ace54 100644 --- a/src/operators/lrn_op.h +++ b/src/operators/lrn_op.h @@ -11,6 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +#ifdef LRN_OP + #pragma once #include @@ -45,3 +48,5 @@ class LrnOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/math/gemm.cpp b/src/operators/math/gemm.cpp index 0c0ae8e3dd84f38218d03a761c58a664b927f161..fc243766bf9f8760178ac4efb0dfdd11a5742fa9 100644 --- a/src/operators/math/gemm.cpp +++ b/src/operators/math/gemm.cpp @@ -13,10 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "operators/math/gemm.h" +#ifndef X86 +#include +#endif namespace paddle_mobile { namespace operators { namespace math { +float ab[MR * NR]; // 将A矩阵分块复制到连续内存(ColMajor) void PackMatrixA(int m, int k, int paddingM, const float *A, int lda, float *buffer) { @@ -170,17 +174,197 @@ void InnerKernel(int m, int n, int k, float alpha, const float *A, int lda, } // 计算一个更小的 4 * 4 的 C 矩阵分块 +#if defined(IOS) +void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, + int ldb, float beta, float *C, int ldc, int mc, int nc) { + // init C + float32x4_t cv0 = vdupq_n_f32(0.0); + float32x4_t cv1 = vdupq_n_f32(0.0); + float32x4_t cv2 = vdupq_n_f32(0.0); + float32x4_t cv3 = vdupq_n_f32(0.0); + + float32x4_t av; + float32x4_t bv; + + float32x2_t av01; + float32x2_t av23; + + for (int p = 0; p < k; p += 1) { + av = vld1q_f32(a); + bv = vld1q_f32(b); + + av01 = vget_low_f32(av); + cv0 = vmlaq_lane_f32(cv0, bv, av01, 0); + cv1 = vmlaq_lane_f32(cv1, bv, av01, 1); + av23 = vget_high_f32(av); + cv2 = vmlaq_lane_f32(cv2, bv, av23, 0); + cv3 = vmlaq_lane_f32(cv3, bv, av23, 1); + + a += MR; + b += NR; + } + float32x4x4_t cv = {cv0, cv1, cv2, cv3}; + int i, j; + for (i = 0; i < mc; ++i) { + for (j = 0; j < nc; ++j) { + if (beta == 0.0) { + C(i, j) = 0.0; + } else if (beta != 1.0) { + C(i, j) *= beta; + } + if (j == 0) { + C(i, j) += alpha * vgetq_lane_f32(cv.val[i], 0); + } else if (j == 1) { + C(i, j) += alpha * vgetq_lane_f32(cv.val[i], 1); + } else if (j == 2) { + C(i, j) += alpha * vgetq_lane_f32(cv.val[i], 2); + } else if (j == 3) { + C(i, j) += alpha * vgetq_lane_f32(cv.val[i], 3); + } + } + } +} +#elif defined(ARMV7) +void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, + int ldb, float beta, float *C, int ldc, int mc, int nc) { + int kc1 = k / 2, kc2 = k % 2; + int bytes_ldc = 4 * ldc; + int flag_alpha = (alpha == 1.0) ? 1 : 2; + int flag_beta; + if (beta == 0.0) { + flag_beta = 0; + } else if (beta == 1.0) { + flag_beta = 1; + } else { + flag_beta = 2; + } + asm volatile( + "vmov.f32 q10, #0.0 \n\t" + "vmov.f32 q11, #0.0 \n\t" + "vmov.f32 q12, #0.0 \n\t" + "vmov.f32 q13, #0.0 \n\t" + + "subs %[kc1], %[kc1], #1 \n\t" + "blt end_kc1_%= \n\t" + "loop_kc1_%=: \n\t" + "vld1.32 {q0, q1}, [%[a]]! \n\t" + "vld1.32 {q2, q3}, [%[b]]! \n\t" + "vmla.f32 q10, q2, d0[0] \n\t" + "vmla.f32 q11, q2, d0[1] \n\t" + "vmla.f32 q12, q2, d1[0] \n\t" + "vmla.f32 q13, q2, d1[1] \n\t" + "vmla.f32 q10, q3, d2[0] \n\t" + "vmla.f32 q11, q3, d2[1] \n\t" + "vmla.f32 q12, q3, d3[0] \n\t" + "vmla.f32 q13, q3, d3[1] \n\t" + "subs %[kc1], %[kc1], #1 \n\t" + "bge loop_kc1_%= \n\t" + "end_kc1_%=: \n\t" + + "subs %[kc2], %[kc2], #1 \n\t" + "blt end_kc2_%= \n\t" + "vld1.32 {q0}, [%[a]]! \n\t" + "vld1.32 {q1}, [%[b]]! \n\t" + "vmla.f32 q10, q1, d0[0] \n\t" + "vmla.f32 q11, q1, d0[1] \n\t" + "vmla.f32 q12, q1, d1[0] \n\t" + "vmla.f32 q13, q1, d1[1] \n\t" + "end_kc2_%=: \n\t" + + "cmp %[mc], #4 \n\t" + "bne temp_%= \n\t" + "cmp %[nc], #4 \n\t" + "bne temp_%= \n\t" + + "vmov.f32 d8[0], %[alpha] \n\t" + "vmov.f32 d8[1], %[beta] \n\t" + + "cmp %[flag_alpha], #1 \n\t" + "bne alpha_%= \n\t" + + "alpha_%=: \n\t" + "vmul.f32 q10, q10, d8[0] \n\t" + "vmul.f32 q11, q11, d8[0] \n\t" + "vmul.f32 q12, q12, d8[0] \n\t" + "vmul.f32 q13, q13, d8[0] \n\t" + + "beta_%=: \n\t" + "cmp %[flag_beta], #0 \n\t" + "beq memory_%= \n\t" + + "mov r4, %[C] \n\t" + "mov r6, %[bytes_ldc]\n\t" + "vld1.32 {q0}, [r4], r6 \n\t" + "vld1.32 {q1}, [r4], r6 \n\t" + "vld1.32 {q2}, [r4], r6 \n\t" + "vld1.32 {q3}, [r4] \n\t" + "cmp %[flag_beta], #1 \n\t" + "beq beta_eq1_%= \n\t" + "bne beta_ne1_%= \n\t" + + "beta_eq1_%=: \n\t" + "vadd.f32 q10, q10, q0 \n\t" + "vadd.f32 q11, q11, q1 \n\t" + "vadd.f32 q12, q12, q2 \n\t" + "vadd.f32 q13, q13, q3 \n\t" + "b memory_%= \n\t" + + "beta_ne1_%=: \n\t" + "vmla.f32 q10, q0, d8[1] \n\t" + "vmla.f32 q11, q1, d8[1] \n\t" + "vmla.f32 q12, q2, d8[1] \n\t" + "vmla.f32 q13, q3, d8[1] \n\t" + + "memory_%=: \n\t" + "mov r5, %[C] \n\t" + "mov r6, %[bytes_ldc]\n\t" + "vst1.32 {q10}, [r5], r6 \n\t" + "vst1.32 {q11}, [r5], r6 \n\t" + "vst1.32 {q12}, [r5], r6 \n\t" + "vst1.32 {q13}, [r5] \n\t" + "b end_%= \n\t" + + "temp_%=: \n\t" + "vst1.32 {q10, q11}, [%[ab]]!\n\t" + "vst1.32 {q12, q13}, [%[ab]] \n\t" + "end_%=: \n\t" + : + : [a] "r"(a), [b] "r"(b), [C] "r"(C), [ab] "r"(ab), [kc1] "r"(kc1), + [kc2] "r"(kc2), [mc] "r"(mc), [nc] "r"(nc), [alpha] "r"(alpha), + [beta] "r"(beta), [bytes_ldc] "r"(bytes_ldc), + [flag_alpha] "r"(flag_alpha), [flag_beta] "r"(flag_beta) + : "memory", "q0", "q1", "q2", "q3", "q4", "q10", "q11", "q12", "q13"); + + if (mc != MR || nc != NR) { + int i, j; + for (i = 0; i < mc; ++i) { + for (j = 0; j < nc; ++j) { + if (beta == 0.0) { + if (alpha != 1.0) { + C(i, j) = alpha * ab[i * MR + j]; + } else { + C(i, j) = ab[i * MR + j]; + } + } else { + if (beta != 1.0) { + C(i, j) *= beta; + } + if (alpha != 1.0) { + C(i, j) += alpha * ab[i * MR + j]; + } else { + C(i, j) += ab[i * MR + j]; + } + } + } + } + } +} +#else void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, int ldb, float beta, float *C, int ldc, int mc, int nc) { float c[16] = {0}; float reg_a0, reg_a1, reg_a2, reg_a3, reg_b0, reg_b1, reg_b2, reg_b3; - // // init C - // float32x4_t cv0 = vdup_n_f32(0.0); - // float32x4_t cv1 = vdup_n_f32(0.0); - // float32x4_t cv2 = vdup_n_f32(0.0); - // float32x4_t cv3 = vdup_n_f32(0.0); - for (int p = 0; p < k; p += 1) { reg_b0 = *b++; reg_b1 = *b++; @@ -232,6 +416,7 @@ void AddDot4x4(int k, float alpha, const float *a, int lda, const float *b, } } } +#endif // 32位 float 矩阵乘法 void sgemm(int m, int n, int k, float alpha, const float *A, int lda, diff --git a/src/operators/math/pool3x3.h b/src/operators/math/pool3x3.h index 3852b901871eb4cdcff0497a1ad2854abf93b7b6..164958288de5cf3bb37dcb2d37c7fe08b7bd7a1a 100644 --- a/src/operators/math/pool3x3.h +++ b/src/operators/math/pool3x3.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef POOL_OP + #pragma once #if __ARM_NEON @@ -25,3 +27,5 @@ static void Pool3x3Max() { static void Pool3x3Avg() { // todo impl with neon } + +#endif diff --git a/src/operators/math/pool_2x2.h b/src/operators/math/pool_2x2.h index 0ed7f4e6abd4f7c78a9f14652fcf662a99d1e549..46e9e36470ceeee39563dc410e63a09aaec973bb 100644 --- a/src/operators/math/pool_2x2.h +++ b/src/operators/math/pool_2x2.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef POOL_OP + #pragma once #if __ARM_NEON @@ -25,3 +27,5 @@ static void Pool2x2Max() { static void Pool2x2Avg() { // todo impl with neon } + +#endif diff --git a/src/operators/math/pooling.cpp b/src/operators/math/pooling.cpp index 07afdb7d14a7260e547e072cc67bd1613e812944..0a823f2cc066e487bf1e3131105b28c0a44e44a4 100644 --- a/src/operators/math/pooling.cpp +++ b/src/operators/math/pooling.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef POOL_OP + #include "pooling.h" #include @@ -91,3 +93,5 @@ template class PoolFunctor, float>; } // namespace math } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/math/pooling.h b/src/operators/math/pooling.h index e511fc0518cb755d481b347df449d0e242a58e14..fc6aabb5f13fdedd9dfe9877748aa4d58b3afe36 100644 --- a/src/operators/math/pooling.h +++ b/src/operators/math/pooling.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef POOL_OP + #pragma once #include "common/log.h" @@ -64,3 +66,5 @@ class PoolFunctor { } } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/math/softmax.cpp b/src/operators/math/softmax.cpp index 224382eb2b78b1653da0cbbd9327cabb4fd9b3d1..a1eb4f13d82376d86da258101b15e6ae5e8bdc97 100644 --- a/src/operators/math/softmax.cpp +++ b/src/operators/math/softmax.cpp @@ -11,6 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +#ifdef SOFTMAX_OP + #include "operators/math/softmax.h" #include "common/types.h" #if __ARM_NEON @@ -153,3 +156,4 @@ template class SoftmaxFuntor; } // namespace math } // namespace operators } // namespace paddle_mobile +#endif diff --git a/src/operators/math/softmax.h b/src/operators/math/softmax.h index 232497da531a44c14772916fa26328c4b3a1f130..e2ca8f30b067e9262a0e87f4ba5807df07949e73 100644 --- a/src/operators/math/softmax.h +++ b/src/operators/math/softmax.h @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef SOFTMAX_OP #pragma once #include "framework/tensor.h" namespace paddle_mobile { @@ -26,3 +27,4 @@ class SoftmaxFuntor { } // namespace math } // namespace operators } // namespace paddle_mobile +#endif diff --git a/src/operators/mul_op.cpp b/src/operators/mul_op.cpp index 80c20122f4b04a3de13a95bc8ed26d48f7464f44..2bd2e0694470518a0220ee020e689e358d70d702 100644 --- a/src/operators/mul_op.cpp +++ b/src/operators/mul_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef MUL_OP + #include "mul_op.h" namespace paddle_mobile { @@ -55,3 +57,5 @@ template class MulOp; namespace ops = paddle_mobile::operators; USE_OP(mul); REGISTER_OPERATOR(mul, ops::MulOp); + +#endif diff --git a/src/operators/mul_op.h b/src/operators/mul_op.h index ded618551fca682daea0bacc3635776eeb81301c..85c6c80b925d0be6507bea9a4262a0e6185324a7 100644 --- a/src/operators/mul_op.h +++ b/src/operators/mul_op.h @@ -11,6 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +#ifdef MUL_OP + #pragma once #include @@ -45,3 +48,5 @@ class MulOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/multiclass_nms_op.cpp b/src/operators/multiclass_nms_op.cpp index bc796010b231929b3f0c017b68f33b861a84262d..1e4c3f8c34020eeeec2e59cb499b7e00c95edb38 100644 --- a/src/operators/multiclass_nms_op.cpp +++ b/src/operators/multiclass_nms_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef MULTICLASSNMS_OP + #include "operators/multiclass_nms_op.h" namespace paddle_mobile { namespace operators { @@ -39,3 +41,5 @@ template class MultiClassNMSOp; namespace ops = paddle_mobile::operators; USE_OP(multiclass_nms); REGISTER_OPERATOR(multiclass_nms, ops::MultiClassNMSOp); + +#endif diff --git a/src/operators/multiclass_nms_op.h b/src/operators/multiclass_nms_op.h index c424856b8cdc09b365a7ece28df39a911b6d3af8..78d6ec31204b4a103f59f99cb1aafd5a16ea985a 100644 --- a/src/operators/multiclass_nms_op.h +++ b/src/operators/multiclass_nms_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef MULTICLASSNMS_OP + #pragma once #include @@ -50,3 +52,5 @@ class MultiClassNMSOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/op_param.cpp b/src/operators/op_param.cpp index ac6ae4cdef77af623097bf6a6d1e73f55339a71a..dc8efa4b5ada6aafdcfefc9203aab955c15f7b06 100644 --- a/src/operators/op_param.cpp +++ b/src/operators/op_param.cpp @@ -12,10 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "op_param.h" +#include "op_param.h" namespace paddle_mobile { namespace operators { + +#ifdef CONV_OP Print &operator<<(Print &printer, const ConvParam &conv_param) { printer << "parameter of conv: " << "\n"; @@ -36,5 +38,7 @@ Print &operator<<(Print &printer, const ConvParam &conv_param) { printer << " output dims: " << conv_param.Output()->dims(); return printer; } +#endif + } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 11f619346eed9f3b4309ec983de6f0b09f1142b7..22890c4453d8a91b21bc7848a87d8159be804d90 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -191,6 +191,7 @@ class OpParam { } }; +#ifdef CONV_OP class ConvParam : OpParam { public: ConvParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -230,7 +231,9 @@ class ConvParam : OpParam { }; Print &operator<<(Print &printer, const ConvParam &conv_param); +#endif +#ifdef ELEMENTWISEADD_OP class ElementwiseAddParam : OpParam { public: ElementwiseAddParam(const VariableNameMap &inputs, @@ -258,6 +261,9 @@ class ElementwiseAddParam : OpParam { int axis_; }; +#endif + +#ifdef MUL_OP class MulParam : OpParam { public: MulParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -287,7 +293,9 @@ class MulParam : OpParam { int x_num_col_dims_; int y_num_col_dims_; }; +#endif +#ifdef CONCAT_OP class ConcatParam : public OpParam { public: ConcatParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -309,7 +317,9 @@ class ConcatParam : public OpParam { Tensor *out_; int axis_; }; +#endif +#ifdef LRN_OP class LrnParam : public OpParam { public: LrnParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -351,6 +361,9 @@ class LrnParam : public OpParam { float k_; string data_format_; }; +#endif + +#ifdef BATCHNORM_OP class BatchNormParam : OpParam { public: BatchNormParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -399,6 +412,9 @@ class BatchNormParam : OpParam { bool is_test_; string data_format_; }; +#endif + +#ifdef POOL_OP class PoolParam : public OpParam { public: PoolParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -442,6 +458,9 @@ class PoolParam : public OpParam { bool gloabal_pooling_ = false; }; +#endif + +#ifdef PRIORBOX_OP class PriorBoxParam : public OpParam { public: PriorBoxParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -503,7 +522,9 @@ class PriorBoxParam : public OpParam { float step_h_; float offset_; }; +#endif +#ifdef BOXCODER_OP class BoxCoderParam : public OpParam { public: BoxCoderParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -533,7 +554,9 @@ class BoxCoderParam : public OpParam { Tensor *output_box_; std::string code_type_; }; +#endif +#ifdef SOFTMAX_OP class SoftmaxParam : public OpParam { public: SoftmaxParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -549,7 +572,9 @@ class SoftmaxParam : public OpParam { Tensor *input_x_; Tensor *out_; }; +#endif +#ifdef SIGMOID_OP class SigmoidParam : public OpParam { public: SigmoidParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -565,6 +590,9 @@ class SigmoidParam : public OpParam { Tensor *input_x_; Tensor *out_; }; +#endif + +#ifdef MULTICLASSNMS_OP class MultiClassNMSParam : public OpParam { public: MultiClassNMSParam(const VariableNameMap &inputs, @@ -610,6 +638,7 @@ class MultiClassNMSParam : public OpParam { float nms_eta_; float score_threshold_; }; +#endif class FeedParam : public OpParam { public: @@ -646,6 +675,7 @@ class FetchParam : public OpParam { Tensor *out_; }; +#ifdef TRANSPOSE_OP class TransposeParam : public OpParam { public: TransposeParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -666,7 +696,9 @@ class TransposeParam : public OpParam { Tensor *out_; vector axis_; }; +#endif +#ifdef RESHAPE_OP class ReshapeParam : public OpParam { public: ReshapeParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -695,7 +727,9 @@ class ReshapeParam : public OpParam { vector shape_; bool inplace_; }; +#endif +#ifdef RELU_OP /* * @b op 层实例化好这个 param 传递给 kernel 层使用 * */ @@ -715,7 +749,9 @@ class ReluParam : public OpParam { Tensor *input_x_; Tensor *out_; }; +#endif +#ifdef FUSION_FC_OP class FushionFcParam : public OpParam { public: FushionFcParam(const VariableNameMap &inputs, const VariableNameMap &outputs, @@ -751,6 +787,7 @@ class FushionFcParam : public OpParam { int y_num_col_dims_; int axis_; }; +#endif } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/pool_op.cpp b/src/operators/pool_op.cpp index 3096199dc3e3157f9fa0048ad35f796e24113f28..e1c5b5ada3478fe35eb989a262815917205b5063 100644 --- a/src/operators/pool_op.cpp +++ b/src/operators/pool_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef POOL_OP + #include "pool_op.h" namespace paddle_mobile { @@ -57,3 +59,5 @@ template class PoolOp; namespace ops = paddle_mobile::operators; USE_OP(pool2d); REGISTER_OPERATOR(pool2d, ops::PoolOp); + +#endif diff --git a/src/operators/pool_op.h b/src/operators/pool_op.h index ff44771c56151acf699b017ddf834a2d32e07761..9ad0bd3e3b95503c53bbeed6a8bca7fdb48ac23e 100644 --- a/src/operators/pool_op.h +++ b/src/operators/pool_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef POOL_OP + #pragma once #include @@ -47,3 +49,5 @@ class PoolOp : public OperatorWithKernel { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/prior_box_op.cpp b/src/operators/prior_box_op.cpp index 3928c3db53414dbb3ef9a6ae4ebe5527dc5eeeca..22f9326b00f41a96de2f6ce3d79f8cbee98fd9f4 100644 --- a/src/operators/prior_box_op.cpp +++ b/src/operators/prior_box_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef PRIORBOX_OP + #include "operators/prior_box_op.h" #include namespace paddle_mobile { @@ -49,3 +51,5 @@ template class PriorBoxOp; namespace ops = paddle_mobile::operators; USE_OP(prior_box); REGISTER_OPERATOR(prior_box, ops::PriorBoxOp); + +#endif diff --git a/src/operators/prior_box_op.h b/src/operators/prior_box_op.h index 84481e602a6cb4143a50760e66b0d430b8a1c719..55080f3c5a77683acc5ee76fc6ab91545004d010 100644 --- a/src/operators/prior_box_op.h +++ b/src/operators/prior_box_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef PRIORBOX_OP + #pragma once #include @@ -50,3 +52,5 @@ class PriorBoxOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/relu_op.cpp b/src/operators/relu_op.cpp index 21bcc605282ffc590025e87b609cccc855a631d1..3beac260935ce2daf8a5b9f1e6b9be178034ac8d 100644 --- a/src/operators/relu_op.cpp +++ b/src/operators/relu_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef RELU_OP + #include "operators/relu_op.h" namespace paddle_mobile { namespace operators { @@ -33,3 +35,5 @@ template class ReluOp; namespace ops = paddle_mobile::operators; USE_OP(relu); REGISTER_OPERATOR(relu, ops::ReluOp); + +#endif diff --git a/src/operators/relu_op.h b/src/operators/relu_op.h index 7be8cd249cb22255dff237da6c8653e6237bbc3f..e52ef5edd2013e30c5004b629c06aa1affe1d20e 100644 --- a/src/operators/relu_op.h +++ b/src/operators/relu_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef RELU_OP + #pragma once #include @@ -59,3 +61,5 @@ class ReluOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/reshape_op.cpp b/src/operators/reshape_op.cpp index 6562b7a5eb491a7e69e9bd9481251b8aaf9f3f4b..44d3de2203cc01f6a6acd6810f4e676f6efb6bbd 100644 --- a/src/operators/reshape_op.cpp +++ b/src/operators/reshape_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef RESHAPE_OP + #include "operators/reshape_op.h" #include namespace paddle_mobile { @@ -32,3 +34,5 @@ template class ReshapeOp; namespace ops = paddle_mobile::operators; USE_OP(reshape); REGISTER_OPERATOR(reshape, ops::ReshapeOp); + +#endif diff --git a/src/operators/reshape_op.h b/src/operators/reshape_op.h index b244e62a930a0e6a98d56fe06a4e4a7e37f7d5e1..ce106125cb6a5b48c3bc9c03e20c7dbec90c90c0 100644 --- a/src/operators/reshape_op.h +++ b/src/operators/reshape_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef RESHAPE_OP + #pragma once #include @@ -49,3 +51,5 @@ class ReshapeOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/sigmoid_op.cpp b/src/operators/sigmoid_op.cpp index 6bff80a35aa019a7b05f6e9b58c49e13fb8f1bc8..8be9309d1047a1d892c0c0151375a8baa01cbca3 100644 --- a/src/operators/sigmoid_op.cpp +++ b/src/operators/sigmoid_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef SIGMOID_OP + #include "operators/sigmoid_op.h" namespace paddle_mobile { @@ -27,3 +29,5 @@ template class SigmoidOp; namespace ops = paddle_mobile::operators; USE_OP(sigmoid); REGISTER_OPERATOR(sigmoid, ops::SigmoidOp); + +#endif diff --git a/src/operators/sigmoid_op.h b/src/operators/sigmoid_op.h index f631ba51759ea31f91ddcdf7c90a0dc874e86b20..3757e2de168e75764b9d8d1d249fe3d8c87d817a 100644 --- a/src/operators/sigmoid_op.h +++ b/src/operators/sigmoid_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef SIGMOID_OP + #pragma once #include @@ -47,3 +49,5 @@ class SigmoidOp : public framework::OperatorWithKernel { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/softmax_op.cpp b/src/operators/softmax_op.cpp index c353d0b882cb8f0682f9e4710ff05c32ca68e685..5973647bfd1624fc4bb71b8112c5d7f8bf9665cd 100644 --- a/src/operators/softmax_op.cpp +++ b/src/operators/softmax_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef SOFTMAX_OP + #include "operators/softmax_op.h" namespace paddle_mobile { @@ -27,3 +29,5 @@ template class SoftmaxOp; namespace ops = paddle_mobile::operators; USE_OP(softmax); REGISTER_OPERATOR(softmax, ops::SoftmaxOp); + +#endif diff --git a/src/operators/softmax_op.h b/src/operators/softmax_op.h index 07fd9b945cb29cecd6f4d629b6be58035f971ce4..1c764248cb72f84adb7665ced0a4375a3cd79624 100644 --- a/src/operators/softmax_op.h +++ b/src/operators/softmax_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef SOFTMAX_OP + #pragma once #include @@ -47,3 +49,5 @@ class SoftmaxOp : public framework::OperatorWithKernel { }; } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/src/operators/transpose_op.cpp b/src/operators/transpose_op.cpp index e21338bf1b59981e914ca4a8e1781e02254bc00c..c8e16f9e4b42037eee84dbe5cd023b67e781f2a5 100644 --- a/src/operators/transpose_op.cpp +++ b/src/operators/transpose_op.cpp @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef TRANSPOSE_OP + #include "operators/transpose_op.h" #include #include @@ -51,3 +53,5 @@ template class TransposeOp; namespace ops = paddle_mobile::operators; USE_OP(transpose); REGISTER_OPERATOR(transpose, ops::TransposeOp); + +#endif diff --git a/src/operators/transpose_op.h b/src/operators/transpose_op.h index 0f67339533261f98374c6257494278306f3a7208..728c6991c014fcf37b9c0b4f467ccc1c4999883b 100644 --- a/src/operators/transpose_op.h +++ b/src/operators/transpose_op.h @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef TRANSPOSE_OP + #pragma once #include @@ -50,3 +52,5 @@ class TransposeOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle_mobile + +#endif diff --git a/test/common/test_gemm.cpp.cpp b/test/common/test_gemm.cpp similarity index 100% rename from test/common/test_gemm.cpp.cpp rename to test/common/test_gemm.cpp diff --git a/test/framework/test_load.cpp b/test/framework/test_load.cpp index 45d7324bf5a694a7f08d803b32aeec9f0b2ca30f..a1f092c4d81ee09d3327a0e2d5425e20dad1ae89 100644 --- a/test/framework/test_load.cpp +++ b/test/framework/test_load.cpp @@ -19,7 +19,7 @@ int main() { paddle_mobile::Loader loader; // ../../../test/models/googlenet // ../../../test/models/mobilenet - auto program = loader.Load(g_mobilenet_ssd, true); + auto program = loader.Load(g_resnet, true); program.originProgram->Description("program desc: "); return 0; } diff --git a/scripts/push2android.sh b/tools/scripts/push2android.sh similarity index 78% rename from scripts/push2android.sh rename to tools/scripts/push2android.sh index ef00f58a96097f0596f3aab0fc8287541096ee5b..9b69e28b8afd675b36f96b7502bbcc749122d761 100644 --- a/scripts/push2android.sh +++ b/tools/scripts/push2android.sh @@ -1,9 +1,9 @@ #!/usr/bin/env sh push_fn () { -MODELS_PATH="../test/models/*" -IMAGE_PATH="../test/images/*" -EXE_FILE="../test/build/*" +MODELS_PATH="../../test/models/*" +IMAGE_PATH="../../test/images/*" +EXE_FILE="../../test/build/*" EXE_DIR="data/local/tmp/bin" MODELS_DIR="data/local/tmp/models" IMAGES_DIR="data/local/tmp/images"