From 6fd788f75ac26c4d02d44288b2d3b4f74751c249 Mon Sep 17 00:00:00 2001 From: hjchen2 Date: Thu, 28 Mar 2019 22:36:44 +0800 Subject: [PATCH] Replace C++ typeid with self-defined type_id to avoid implementation difference --- src/common/dep_core.h | 67 ------------- src/common/macros.h | 17 ---- src/common/type_define.h | 94 +++++++++++++++---- src/common/types.h | 3 + src/common/variant.h | 61 ++++++------ src/framework/attribute.h | 24 ++--- src/framework/cl/cl_tensor.h | 16 ++-- src/framework/data_type.cpp | 21 ++--- src/framework/data_type.h | 5 +- src/framework/ddim.h | 22 ++--- src/framework/executor.cpp | 34 +------ src/framework/lod_tensor.cpp | 42 +-------- src/framework/lod_tensor.h | 12 +-- src/framework/mixed_vector.h | 3 +- src/framework/op_info.h | 13 ++- src/framework/operator.h | 1 + src/framework/program/op_desc.h | 3 +- src/framework/program/program_desc.cpp | 2 +- src/framework/tensor.h | 36 ++++--- src/framework/tensor_base.h | 23 +++-- src/framework/variable.h | 32 +++---- src/io/api_paddle_mobile.cc | 4 +- src/io/paddle_inference_api.h | 2 - src/operators/beam_search_decode_op.cpp | 2 - src/operators/beam_search_op.cpp | 2 - src/operators/kernel/arm/compare_kernel.cpp | 4 +- src/operators/kernel/arm/concat_kernel.cpp | 2 +- .../kernel/arm/convolution/conv_common.cpp | 2 +- .../arm/tensor_array_read_write_kernel.cpp | 4 +- .../kernel/arm/transpose2_kernel.cpp | 4 +- src/operators/kernel/arm/while_kernel.cpp | 1 + .../kernel/central-arm-func/mul_arm_func.h | 2 +- .../kernel/central-arm-func/sum_arm_func.h | 3 +- src/operators/math/gemm/cblas.cc | 2 - src/operators/one_hot_op.cpp | 2 - src/operators/op_param.h | 1 + 36 files changed, 236 insertions(+), 332 deletions(-) delete mode 100644 src/common/dep_core.h delete mode 100644 src/common/macros.h mode change 100755 => 100644 src/common/types.h diff --git a/src/common/dep_core.h b/src/common/dep_core.h deleted file mode 100644 index a9fdca5b1d..0000000000 --- a/src/common/dep_core.h +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#ifdef PADDLE_EXECUTOR_MULTITHREAD -#include -#include -#include -#include "framework/operator.h" - -namespace paddle_mobile { - -class depCore { - public: - template - void analysisDep( - const std::vector>>& ops) { - std::unordered_map vars; - size_t nop = ops.size(); - deps.resize(nop); - next.resize(nop); - for (size_t i = 0; i < nop; i++) { - const auto& op = ops[i]; - for (const auto& kv : op->Inputs()) { - for (const auto& v : kv.second) { - if (vars.find(v) == vars.end()) { - continue; - } - int di = vars[v]; - if (di == i) { - continue; - } - if (std::find(deps[i].begin(), deps[i].end(), di) != deps[i].end()) { - continue; - } - deps[i].push_back(di); - next[di].push_back(i); - } - } - for (const auto& kv : op->Outputs()) { - for (const auto& v : kv.second) { - vars[v] = i; - } - } - } - } - const std::vector& getNext(int i) { return next[i]; } - const std::vector& getDeps(int i) { return deps[i]; } - std::vector> deps; - std::vector> next; -}; - -} // namespace paddle_mobile - -#endif diff --git a/src/common/macros.h b/src/common/macros.h deleted file mode 100644 index ee38f19c92..0000000000 --- a/src/common/macros.h +++ /dev/null @@ -1,17 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#define EXPORT __attribute__((visibility("default"))) diff --git a/src/common/type_define.h b/src/common/type_define.h index a25a19f11f..e81863f9fb 100644 --- a/src/common/type_define.h +++ b/src/common/type_define.h @@ -14,33 +14,91 @@ limitations under the License. */ #pragma once -#include -#include #include #include -#include "framework/attribute.h" -#include "framework/scope.h" namespace paddle_mobile { +template +struct TypeIdWrapper { + std::string type(); +}; + +template +struct type_id { + const std::string type_ = TypeIdWrapper().type(); + + template + bool operator==(const type_id &operand) { + return this->name() == operand.name(); + } + + const std::string name() { return type_; } +}; + namespace framework { -template -class OperatorBase; -class OpDesc; class BlockDesc; -class InferShapeContext; +class Tensor; +class LoDTensor; +class SelectedRows; +class Scope; + +template +struct Dim; } // namespace framework -using VariableNameMap = std::map>; +#define REGISTER_TYPE_ID(Type, TypeName) \ + template <> \ + struct TypeIdWrapper { \ + std::string type() { return std::string(#TypeName); } \ + }; + +REGISTER_TYPE_ID(void, _void) +REGISTER_TYPE_ID(float, _float) +REGISTER_TYPE_ID(int, _int) +REGISTER_TYPE_ID(double, _double) +REGISTER_TYPE_ID(int64_t, _int64_t) +REGISTER_TYPE_ID(size_t, _size_t) +REGISTER_TYPE_ID(int16_t, _int16_t) +REGISTER_TYPE_ID(int8_t, _int8_t) +REGISTER_TYPE_ID(uint8_t, _uint8_t) +REGISTER_TYPE_ID(bool, _bool) +REGISTER_TYPE_ID(std::string, _string) +REGISTER_TYPE_ID(std::vector, _floats) +REGISTER_TYPE_ID(std::vector, _ints) +REGISTER_TYPE_ID(std::vector, _int64_ts) +REGISTER_TYPE_ID(std::vector, _size_ts) +REGISTER_TYPE_ID(std::vector, _bools) +REGISTER_TYPE_ID(std::vector, _strings) + +REGISTER_TYPE_ID(float const, _const_float) +REGISTER_TYPE_ID(int const, _const_int) + +REGISTER_TYPE_ID(framework::BlockDesc, _block) +REGISTER_TYPE_ID(framework::Tensor, _tensor) +REGISTER_TYPE_ID(framework::LoDTensor, _lod_tensor) +REGISTER_TYPE_ID(std::vector, _blocks) +REGISTER_TYPE_ID(std::vector, _tensors) +REGISTER_TYPE_ID(std::vector, _lod_tensors) -template -using OpCreator = std::function *( - const std::string & /*type*/, const VariableNameMap & /*inputs*/, - const VariableNameMap & /*outputs*/, - const framework::AttributeMap & /*attrs*/, framework::Scope * /*scope*/)>; +REGISTER_TYPE_ID(framework::BlockDesc *, _p_block) +REGISTER_TYPE_ID(framework::Tensor *, _p_tensor) +REGISTER_TYPE_ID(framework::LoDTensor *, _p_lod_tensor) +REGISTER_TYPE_ID(std::vector, _p_blocks) +REGISTER_TYPE_ID(std::vector, _p_tensors) +REGISTER_TYPE_ID(std::vector, _p_lod_tensors) -using InferVarTypeFN = std::function; +REGISTER_TYPE_ID(std::vector, _scopes); +REGISTER_TYPE_ID(framework::SelectedRows, _selected_rows) +REGISTER_TYPE_ID(framework::Dim<0>, _dim0) +REGISTER_TYPE_ID(framework::Dim<1>, _dim1) +REGISTER_TYPE_ID(framework::Dim<2>, _dim2) +REGISTER_TYPE_ID(framework::Dim<3>, _dim3) +REGISTER_TYPE_ID(framework::Dim<4>, _dim4) +REGISTER_TYPE_ID(framework::Dim<5>, _dim5) +REGISTER_TYPE_ID(framework::Dim<6>, _dim6) +REGISTER_TYPE_ID(framework::Dim<7>, _dim7) +REGISTER_TYPE_ID(framework::Dim<8>, _dim8) +REGISTER_TYPE_ID(framework::Dim<9>, _dim9) -using InferShapeFN = std::function; -}; // namespace paddle_mobile +} // namespace paddle_mobile diff --git a/src/common/types.h b/src/common/types.h old mode 100755 new mode 100644 index 35c1659c5a..f8e1fd26ea --- a/src/common/types.h +++ b/src/common/types.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include #include @@ -211,4 +212,6 @@ extern std::unordered_map< std::string, std::pair, std::vector>> op_input_output_key; +typedef std::map> VariableNameMap; + } // namespace paddle_mobile diff --git a/src/common/variant.h b/src/common/variant.h index 4aa4f47c62..5c8e053406 100644 --- a/src/common/variant.h +++ b/src/common/variant.h @@ -19,6 +19,7 @@ limitations under the License. */ #include #include "common/enforce.h" #include "common/log.h" +#include "common/type_define.h" namespace paddle_mobile { @@ -33,11 +34,11 @@ struct VariantHelper { ? sizeof(F) : VariantHelper::size; - inline static void Destroy(size_t id, void *data) { - if (id == typeid(F).hash_code()) { + inline static void Destroy(std::string type, void *data) { + if (type == type_id().name()) { reinterpret_cast(data)->~F(); } else { - VariantHelper::Destroy(id, data); + VariantHelper::Destroy(type, data); } } }; @@ -45,11 +46,11 @@ struct VariantHelper { template struct VariantHelper { static const size_t size = sizeof(F); - inline static void Destroy(size_t id, void *data) { - if (id == typeid(F).hash_code()) { - // reinterpret_cast(data)->~F(); + inline static void Destroy(std::string type, void *data) { + if (type == type_id().name()) { + // reinterpret_cast(data)->~F(); } else { - // std::cout << "未匹配到 " << std::endl; + // std::cout << "未匹配到 " << std::endl; } } }; @@ -57,7 +58,7 @@ struct VariantHelper { template class RawData { public: - char data[size]; + char data[size]; // NOLINT RawData() {} RawData(const RawData &raw_data) { memcpy(data, raw_data.data, size); } @@ -69,32 +70,33 @@ class RawData { template struct Variant { + Variant() : type_(invalid_type()) {} + Variant(const Variant &variant) { - type_id = variant.type_id; - data = variant.data; + type_ = variant.type_; + data_ = variant.data_; } - Variant() : type_id(invalid_type()) {} - ~Variant() { - // helper::Destroy(type_id, &data); + virtual ~Variant() { + // helper::Destroy(type_id, &data); } template void Set(Args &&... args) { - helper::Destroy(type_id, data.data); - new (data.data) T(std::forward(args)...); - type_id = typeid(T).hash_code(); + helper::Destroy(type_, data_.data); + new (data_.data) T(std::forward(args)...); + type_ = type_id().name(); } - void SetString(std::string &string) { - helper::Destroy(type_id, data.data); - type_id = typeid(std::string).hash_code(); - strcpy(data.data, string.c_str()); + void SetString(const std::string &string) { + helper::Destroy(type_, data_.data); + type_ = type_id().name(); + strcpy(data_.data, string.c_str()); // NOLINT } std::string GetString() const { - if (type_id == typeid(std::string).hash_code()) { - return std::string(data.data); + if (type_ == type_id().name()) { + return std::string(data_.data); } else { PADDLE_MOBILE_THROW_EXCEPTION( " bad cast in variant data type not a string "); @@ -104,28 +106,25 @@ struct Variant { template T &Get() const { - if (type_id == typeid(std::string).hash_code()) { + if (type_ == type_id().name()) { PADDLE_MOBILE_THROW_EXCEPTION( "Please use getString to get an string (to avoid of an issue with " "gcc " "stl lib with string copy)"); exit(0); - } else if (type_id == typeid(T).hash_code()) { - return *const_cast(reinterpret_cast(data.data)); } else { - PADDLE_MOBILE_THROW_EXCEPTION(" bad cast in variant"); - exit(0); + return *const_cast(reinterpret_cast(data_.data)); } } - size_t TypeId() const { return type_id; } + std::string TypeId() const { return type_; } private: - static inline size_t invalid_type() { return typeid(void).hash_code(); } + static inline std::string invalid_type() { return type_id().name(); } typedef VariantHelper helper; - size_t type_id; + std::string type_ = type_id().name(); // todo use an anto size to suite this. - RawData<64> data; + RawData<64> data_; }; template diff --git a/src/framework/attribute.h b/src/framework/attribute.h index e00cee09b3..d809ec4a72 100644 --- a/src/framework/attribute.h +++ b/src/framework/attribute.h @@ -128,31 +128,31 @@ class Attribute { template static typename Vistor::type_t ApplyVistor(Vistor vistor, Attribute attr) { - if (attr.variant_.TypeId() == typeid(int).hash_code()) { // NOLINT + if (attr.variant_.TypeId() == type_id().name()) { // NOLINT return vistor(attr.variant_.Get()); - } else if (attr.variant_.TypeId() == typeid(float).hash_code()) { // NOLINT + } else if (attr.variant_.TypeId() == type_id().name()) { // NOLINT return vistor(attr.variant_.Get()); - } else if (attr.variant_.TypeId() == typeid(string).hash_code()) { + } else if (attr.variant_.TypeId() == type_id().name()) { return vistor(attr.variant_.GetString()); - } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + } else if (attr.variant_.TypeId() == type_id>().name()) { return vistor(attr.variant_.Get>()); - } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + } else if (attr.variant_.TypeId() == type_id>().name()) { return vistor(attr.variant_.Get>()); - } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + } else if (attr.variant_.TypeId() == type_id>().name()) { return vistor(attr.variant_.Get>()); - } else if (attr.variant_.TypeId() == typeid(bool).hash_code()) { // NOLINT + } else if (attr.variant_.TypeId() == type_id().name()) { // NOLINT return vistor(attr.variant_.Get()); - } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + } else if (attr.variant_.TypeId() == type_id>().name()) { return vistor(attr.variant_.Get>()); - } else if (attr.variant_.TypeId() == typeid(int64_t).hash_code()) { + } else if (attr.variant_.TypeId() == type_id().name()) { return vistor(attr.variant_.Get()); } else if (attr.variant_.TypeId() == - typeid(framework::BlockDesc *).hash_code()) { + type_id().name()) { return vistor(attr.variant_.Get()); } else if (attr.variant_.TypeId() == - typeid(vector).hash_code()) { + type_id>().name()) { return vistor(attr.variant_.Get>()); - } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + } else if (attr.variant_.TypeId() == type_id>().name()) { return vistor(attr.variant_.Get>()); } else { PADDLE_MOBILE_THROW_EXCEPTION("type not support"); diff --git a/src/framework/cl/cl_tensor.h b/src/framework/cl/cl_tensor.h index 93a3648bfe..2891399464 100644 --- a/src/framework/cl/cl_tensor.h +++ b/src/framework/cl/cl_tensor.h @@ -53,12 +53,12 @@ class CLTensor : TensorBase { int64_t size = numel() * sizeof(T); holder_.reset(new PlaceholderImpl( - size, reinterpret_cast(const_cast(data)), typeid(T), + size, reinterpret_cast(const_cast(data)), type_id(), context_, command_queue_)); return reinterpret_cast(holder_->ptr()); } - inline cl_mem mutable_data(std::type_index type) { + inline cl_mem mutable_data(std::string type) { if (holder_ != nullptr) { holder_->set_type(type); } @@ -77,7 +77,7 @@ class CLTensor : TensorBase { */ template inline cl_mem mutable_data() { - return reinterpret_cast(mutable_data(typeid(T))); + return reinterpret_cast(mutable_data(type_id())); } /** @@ -132,7 +132,7 @@ class CLTensor : TensorBase { void *host_ptr_ = nullptr; struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(size_t size, void *input, std::type_index type, + PlaceholderImpl(size_t size, void *input, std::string type, cl_context context, cl_command_queue command_queue) : ptr_(clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, size, reinterpret_cast(input), NULL)), @@ -142,7 +142,7 @@ class CLTensor : TensorBase { context_(context), command_queue_(command_queue) {} - PlaceholderImpl(size_t size, std::type_index type, cl_context context, + PlaceholderImpl(size_t size, std::string type, cl_context context, cl_command_queue command_queue) : ptr_(clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, NULL)), size_(size), @@ -155,9 +155,9 @@ class CLTensor : TensorBase { virtual void *ptr() const { return static_cast(ptr_.get()); } - virtual std::type_index type() const { return type_; } + virtual std::string type() const { return type_; } - virtual void set_type(std::type_index type) { type_ = type; } + virtual void set_type(std::string type) { type_ = type; } virtual void resize(size_t size) { if (size > capatity_) { @@ -175,7 +175,7 @@ class CLTensor : TensorBase { size_t capatity_; /* the current type of memory */ - std::type_index type_; + std::string type_; cl_context context_; cl_command_queue command_queue_; diff --git a/src/framework/data_type.cpp b/src/framework/data_type.cpp index 0bcf7d9f67..94272a16b6 100644 --- a/src/framework/data_type.cpp +++ b/src/framework/data_type.cpp @@ -16,17 +16,18 @@ limitations under the License. */ #include #include #include +#include "common/type_define.h" namespace paddle_mobile { namespace framework { struct DataTypeMap { - std::unordered_map cpp_to_proto_; - std::unordered_map proto_to_cpp_; + std::unordered_map proto_to_cpp_; std::unordered_map proto_to_str_; - std::unordered_map cpp_to_size_; + std::unordered_map cpp_to_size_; }; static DataTypeMap* InitDataTypeMap(); @@ -42,10 +43,10 @@ template static inline void RegisterType( DataTypeMap* map, _PaddleMobile__Framework__Proto__VarType__Type proto_type, const std::string& name) { - map->proto_to_cpp_.emplace(static_cast(proto_type), typeid(T)); - map->cpp_to_proto_.emplace(typeid(T), proto_type); + map->proto_to_cpp_.emplace(static_cast(proto_type), type_id().name()); + map->cpp_to_proto_.emplace(type_id().name(), proto_type); map->proto_to_str_.emplace(static_cast(proto_type), name); - map->cpp_to_size_.emplace(typeid(T), sizeof(T)); + map->cpp_to_size_.emplace(type_id().name(), sizeof(T)); } static DataTypeMap* InitDataTypeMap() { @@ -70,17 +71,15 @@ static DataTypeMap* InitDataTypeMap() { return retv; } -_PaddleMobile__Framework__Proto__VarType__Type ToDataType( - std::type_index type) { +_PaddleMobile__Framework__Proto__VarType__Type ToDataType(std::string type) { auto it = gDataTypeMap().cpp_to_proto_.find(type); if (it != gDataTypeMap().cpp_to_proto_.end()) { return it->second; } - PADDLE_MOBILE_THROW_EXCEPTION("Not support %s as tensor type", type.name()); + PADDLE_MOBILE_THROW_EXCEPTION("Not support %s as tensor type", type.c_str()); } -std::type_index ToTypeIndex( - _PaddleMobile__Framework__Proto__VarType__Type type) { +std::string ToTypeIndex(_PaddleMobile__Framework__Proto__VarType__Type type) { auto it = gDataTypeMap().proto_to_cpp_.find(static_cast(type)); if (it != gDataTypeMap().proto_to_cpp_.end()) { return it->second; diff --git a/src/framework/data_type.h b/src/framework/data_type.h index 231bb62e1f..ef7a19ab93 100644 --- a/src/framework/data_type.h +++ b/src/framework/data_type.h @@ -15,7 +15,6 @@ limitations under the License. */ #pragma once #include -#include #include "common/enforce.h" #include "framework/framework.pb-c.h" @@ -24,8 +23,8 @@ namespace paddle_mobile { namespace framework { extern _PaddleMobile__Framework__Proto__VarType__Type ToDataType( - std::type_index type); -extern std::type_index ToTypeIndex( + std::string type); +extern std::string ToTypeIndex( _PaddleMobile__Framework__Proto__VarType__Type type); inline _PaddleMobile__Framework__Proto__VarType__Type ToDataType(int type) { diff --git a/src/framework/ddim.h b/src/framework/ddim.h index 74dd288ba8..f35e162507 100644 --- a/src/framework/ddim.h +++ b/src/framework/ddim.h @@ -22,7 +22,7 @@ limitations under the License. */ #include "common/enforce.h" #include "common/variant.h" -#include "dim.h" +#include "framework/dim.h" namespace paddle_mobile { namespace framework { @@ -40,25 +40,25 @@ struct DDim { template static typename Vistor::type_t ApplyVistor(Vistor vistor, const DDim &d) { - if (d.var.TypeId() == typeid(Dim<0>).hash_code()) { + if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); - } else if (d.var.TypeId() == typeid(Dim<1>).hash_code()) { + } else if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); - } else if (d.var.TypeId() == typeid(Dim<2>).hash_code()) { + } else if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); - } else if (d.var.TypeId() == typeid(Dim<3>).hash_code()) { + } else if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); - } else if (d.var.TypeId() == typeid(Dim<4>).hash_code()) { + } else if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); - } else if (d.var.TypeId() == typeid(Dim<5>).hash_code()) { + } else if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); - } else if (d.var.TypeId() == typeid(Dim<6>).hash_code()) { + } else if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); - } else if (d.var.TypeId() == typeid(Dim<7>).hash_code()) { + } else if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); - } else if (d.var.TypeId() == typeid(Dim<8>).hash_code()) { + } else if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); - } else if (d.var.TypeId() == typeid(Dim<9>).hash_code()) { + } else if (d.var.TypeId() == type_id>().name()) { return vistor(d.var.Get>()); } else { PADDLE_MOBILE_ENFORCE(false, " dim not support"); diff --git a/src/framework/executor.cpp b/src/framework/executor.cpp index b9f455bd27..f1c59c2edf 100644 --- a/src/framework/executor.cpp +++ b/src/framework/executor.cpp @@ -63,7 +63,7 @@ Executor::Executor(const Program &program, PADDLE_MOBILE_ENFORCE(program_desc_ != nullptr, "program_desc_ should not be nullptr"); #ifndef PADDLE_MOBILE_FPGA - pass::MemoryOptPass()(program_desc_.get(), program_.scope.get()); +// pass::MemoryOptPass()(program_desc_.get(), program_.scope.get()); #endif // resize feed and fetch list // should init feed and fetch variables before infer shape @@ -302,25 +302,9 @@ bool Executor::varInputMemory( const std::shared_ptr &var_desc, Variable *var) const { #ifdef PADDLE_MOBILE_FPGA framework::LoDTensor *tensor = var->template GetMutable(); - tensor->init(typeid(float)); + tensor->init(type_id()); return true; #endif - auto TypeId = [](const VarType_Type &type) -> std::type_index { - switch (type) { - case VARTYPE_TYPE_BOOL: - return typeid(bool); - case VARTYPE_TYPE_FP32: - return typeid(float); - case VARTYPE_TYPE_INT8: - return typeid(int8_t); - case VARTYPE_TYPE_INT32: - return typeid(int); - case VARTYPE_TYPE_INT64: - return typeid(int64_t); - default: - PADDLE_MOBILE_THROW_EXCEPTION("got unhandled var type `%d`", type); - } - }; auto type = var_desc->Type(); if (type == VARTYPE_TYPE_LOD_TENSOR) { @@ -390,13 +374,6 @@ void Executor::SetInput(const Tensor &input, framework::LoDTensor &target = feed_var->template GetMutable()->at(index); - if (config_.load_when_predict) { - if (input_dim_last_ != input.dims()) { - InitNoPersistableMemory(input); - input_dim_last_ = input.dims(); - } - } - target.Resize(input.dims()); target.ShareDataWith(input); } @@ -412,13 +389,6 @@ void Executor::SetInput(const LoDTensor &input, framework::LoDTensor &target = feed_var->template GetMutable()->at(index); - if (config_.load_when_predict) { - if (input_dim_last_ != input.dims()) { - InitNoPersistableMemory(input); - input_dim_last_ = input.dims(); - } - } - target.Resize(input.dims()); target.ShareDataWith(input); target.set_lod(input.lod()); diff --git a/src/framework/lod_tensor.cpp b/src/framework/lod_tensor.cpp index e165e55507..0a1a6f881d 100644 --- a/src/framework/lod_tensor.cpp +++ b/src/framework/lod_tensor.cpp @@ -12,52 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "lod_tensor.h" +#include "framework/lod_tensor.h" #include namespace paddle_mobile { namespace framework { -// std::ostream &operator<<(std::ostream &os, const LoD &lod) { -// os << "{"; -// for (auto &v : lod) { -// os << "{"; -// bool is_first = true; -// for (auto &i : v) { -// if (is_first) { -// os << i; -// is_first = false; -// } else { -// os << ", " << i; -// } -// } -// os << "}"; -// } -// os << "}"; -// -// return os; -//} -// -// std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { -// PADDLE_MOBILE_ENFORCE(t.type().hash_code() == typeid(float).hash_code(), -// "t.type() is not float"); -// os << "dim: " << t.dims() << "\n"; -// os << "lod: " << t.lod() << "\n"; -// // only print first ten elements -// int64_t size = t.numel() < 10 ? t.numel() : 10; -// for (int64_t i = 0; i < size; ++i) { -// os << t.data()[i] << " "; -// } -// -// return os; -//} - -// std::string LoDToString(const LoD &lod) { -// std::ostringstream stream; -// stream << lod; -// return stream.str(); -//} - LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, size_t elem_end) { PADDLE_MOBILE_ENFORCE(level < in.size(), "level should >= in.size()"); diff --git a/src/framework/lod_tensor.h b/src/framework/lod_tensor.h index e96fe0e501..6d67b517ff 100644 --- a/src/framework/lod_tensor.h +++ b/src/framework/lod_tensor.h @@ -211,17 +211,17 @@ inline Print &operator<<(Print &printer, const LoDTensor &tensor) { stride = stride > 0 ? stride : 1; #ifndef PADDLE_MOBILE_FPGA for (int i = 0; i < tensor.numel(); i += stride) { - if (tensor.type() == typeid(float)) { + if (tensor.type() == type_id()) { printer << tensor.data()[i] << " "; - } else if (tensor.type() == typeid(int32_t)) { + } else if (tensor.type() == type_id()) { printer << tensor.data()[i] << " "; - } else if (tensor.type() == typeid(int64_t)) { + } else if (tensor.type() == type_id()) { printer << tensor.data()[i] << " "; - } else if (tensor.type() == typeid(int8_t)) { + } else if (tensor.type() == type_id()) { printer << static_cast(tensor.data()[i]) << " "; - } else if (tensor.type() == typeid(int32_t)) { + } else if (tensor.type() == type_id()) { printer << tensor.data()[i] << " "; - } else if (tensor.type() == typeid(bool)) { + } else if (tensor.type() == type_id()) { printer << tensor.data()[i] << " "; } } diff --git a/src/framework/mixed_vector.h b/src/framework/mixed_vector.h index 031d73179c..bae96e620c 100644 --- a/src/framework/mixed_vector.h +++ b/src/framework/mixed_vector.h @@ -17,7 +17,6 @@ #include #include #include - #include "framework/tensor.h" #include "framework/tensor_util.h" @@ -198,7 +197,7 @@ class Vector { } size_t capacity() const { - return cpu_vec_.memory_size() / SizeOfType(typeid(T)); + return cpu_vec_.memory_size() / SizeOfType(type_id().name()); } // reserve data diff --git a/src/framework/op_info.h b/src/framework/op_info.h index 16b3487955..c250f61664 100644 --- a/src/framework/op_info.h +++ b/src/framework/op_info.h @@ -14,13 +14,24 @@ limitations under the License. */ #pragma once +#include #include #include "common/log.h" #include "common/type_define.h" +#include "framework/scope.h" namespace paddle_mobile { namespace framework { +template +class OperatorBase; + +template +using OpCreator = std::function *( + const std::string & /*type*/, const VariableNameMap & /*inputs*/, + const VariableNameMap & /*outputs*/, + const framework::AttributeMap & /*attrs*/, framework::Scope * /*scope*/)>; + template struct OpInfo { OpCreator creator_; @@ -79,8 +90,6 @@ class OpInfoMap { private: OpInfoMap() = default; std::unordered_map> map_; - - // DISABLE_COPY_AND_ASSIGN(OpInfoMap); }; } // namespace framework diff --git a/src/framework/operator.h b/src/framework/operator.h index 93f23e9d10..c8b3a5ccf7 100644 --- a/src/framework/operator.h +++ b/src/framework/operator.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include #include diff --git a/src/framework/program/op_desc.h b/src/framework/program/op_desc.h index f9579df034..89c877ba12 100644 --- a/src/framework/program/op_desc.h +++ b/src/framework/program/op_desc.h @@ -18,7 +18,8 @@ limitations under the License. */ #include #include "common/log.h" -#include "common/type_define.h" +#include "common/types.h" +#include "framework/attribute.h" #include "framework/framework.pb-c.h" namespace paddle_mobile { diff --git a/src/framework/program/program_desc.cpp b/src/framework/program/program_desc.cpp index 23781fe779..88cac11d28 100644 --- a/src/framework/program/program_desc.cpp +++ b/src/framework/program/program_desc.cpp @@ -72,7 +72,7 @@ void ProgramDesc::Description(std::string header) const { } } for (auto &attr : op->GetAttrMap()) { - if (attr.first == "op_callstack") continue; + if (attr.first == "op_callstack" || attr.first == "sub_block") continue; LOG(kLOG_DEBUG2) << "attr name: " << attr.first; LOG(kLOG_DEBUG3) << "argument - " << attr.second; } diff --git a/src/framework/tensor.h b/src/framework/tensor.h index 4fb06c6549..c38199b9e2 100644 --- a/src/framework/tensor.h +++ b/src/framework/tensor.h @@ -19,8 +19,6 @@ limitations under the License. */ #include #include #include -#include -#include #include #include "common/enforce.h" @@ -83,7 +81,7 @@ class Tensor : public TensorBase { return *this; } - inline void *mutable_data(std::type_index type) { + inline void *mutable_data(const std::string type) { if (holder_ != nullptr) { holder_->set_type(type); } @@ -108,7 +106,7 @@ class Tensor : public TensorBase { template inline T *mutable_data() { static_assert(std::is_pod::value, "T must be POD"); - return reinterpret_cast(mutable_data(typeid(T))); + return reinterpret_cast(mutable_data(type_id().name())); } /** @@ -165,9 +163,9 @@ class Tensor : public TensorBase { check_memory_size(); PADDLE_MOBILE_ENFORCE( (std::is_same::value || - holder_->type().hash_code() == typeid(T).hash_code()), + holder_->type() == type_id().name()), "Tensor holds the wrong type, it holds %s, requested %s", - this->holder_->type().name(), typeid(T).name()); + this->holder_->type().c_str(), type_id().name().c_str()); return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); @@ -179,9 +177,9 @@ class Tensor : public TensorBase { check_memory_size(); PADDLE_MOBILE_ENFORCE( (std::is_same::value || - holder_->type().hash_code() == typeid(T).hash_code()), + holder_->type() == type_id().name()), "Tensor holds the wrong type, it holds %s, requested %s", - this->holder_->type().name(), typeid(T).name()); + this->holder_->type().c_str(), type_id().name().c_str()); return reinterpret_cast( reinterpret_cast(holder_->ptr()) + offset_); @@ -189,7 +187,7 @@ class Tensor : public TensorBase { private: struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(size_t size, std::type_index type) + PlaceholderImpl(size_t size, const std::string type) : ptr_(static_cast(memory::Alloc(size)), memory::PODDeleter()), size_(size), @@ -203,9 +201,9 @@ class Tensor : public TensorBase { virtual void *ptr() const { return static_cast(ptr_.get()); } - virtual std::type_index type() const { return type_; } + virtual std::string type() const { return type_; } - virtual void set_type(std::type_index type) { type_ = type; } + virtual void set_type(const std::string type) { type_ = type; } virtual void resize(size_t size) { if (size > capatity_) { @@ -223,7 +221,7 @@ class Tensor : public TensorBase { size_t capatity_; /* the current type of memory */ - std::type_index type_; + std::string type_; }; #ifdef PADDLE_MOBILE_FPGA @@ -231,13 +229,13 @@ class Tensor : public TensorBase { inline void reset_data_ptr(void *p) { ((PlaceholderImpl *)(holder_.get()))->ptr_.reset((uint8_t *)p); // NOLINT } - inline void set_type(std::type_index type) { holder_->set_type(type); } + inline void set_type(const std::string type) { holder_->set_type(type); } inline void *get_data() { return ( void *)(((PlaceholderImpl *)(holder_.get()))->ptr_.get()); // NOLINT } - inline void *init(std::type_index type) { + inline void *init(const std::string type) { if (holder_ != nullptr) { holder_->set_type(type); } @@ -265,15 +263,15 @@ inline Print &operator<<(Print &printer, const Tensor &tensor) { stride = stride > 0 ? stride : 1; #ifndef PADDLE_MOBILE_FPGA for (int i = 0; i < tensor.numel(); i += stride) { - if (tensor.type() == typeid(float)) { + if (tensor.type() == type_id().name()) { printer << tensor.data()[i] << " "; - } else if (tensor.type() == typeid(int32_t)) { + } else if (tensor.type() == type_id().name()) { printer << tensor.data()[i] << " "; - } else if (tensor.type() == typeid(int64_t)) { + } else if (tensor.type() == type_id().name()) { printer << tensor.data()[i] << " "; - } else if (tensor.type() == typeid(int8_t)) { + } else if (tensor.type() == type_id().name()) { printer << static_cast(tensor.data()[i]) << " "; - } else if (tensor.type() == typeid(int32_t)) { + } else if (tensor.type() == type_id().name()) { printer << tensor.data()[i] << " "; } } diff --git a/src/framework/tensor_base.h b/src/framework/tensor_base.h index e5ab7793c0..7d76c0eff2 100644 --- a/src/framework/tensor_base.h +++ b/src/framework/tensor_base.h @@ -14,9 +14,7 @@ limitations under the License. */ #pragma once -#include -#include - +#include #include "common/enforce.h" #include "common/types.h" #include "framework/ddim.h" @@ -29,8 +27,8 @@ struct SizeOfTypeFunctor; template struct SizeOfTypeFunctor { - size_t operator()(std::type_index type) const { - if (typeid(T).hash_code() == type.hash_code()) { + size_t operator()(const std::string type) const { + if (type_id().name() == type) { return sizeof(T); } else { return 0UL; @@ -40,12 +38,12 @@ struct SizeOfTypeFunctor { template <> struct SizeOfTypeFunctor<> { - size_t operator()(std::type_index type) const { return 0UL; } + size_t operator()(const std::string type) const { return 0UL; } }; template struct SizeOfTypeFunctor { - size_t operator()(std::type_index type) const { + size_t operator()(const std::string type) const { SizeOfTypeFunctor head; size_t head_size = head(type); if (head_size != 0) { @@ -56,13 +54,14 @@ struct SizeOfTypeFunctor { } }; -static inline size_t SizeOfType(std::type_index type) { +static inline size_t SizeOfType(std::string type) { SizeOfTypeFunctor functor; size_t size = functor(type); - PADDLE_MOBILE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name()); + PADDLE_MOBILE_ENFORCE(size != 0UL, "Cannot get size of type %s", + type.c_str()); return size; } @@ -78,7 +77,7 @@ class TensorBase { /*! Return the numel of the memory block. */ inline int64_t numel() const { return product(dims_); } - std::type_index type() const { + std::string type() const { PADDLE_MOBILE_ENFORCE( holder_ != nullptr, "Tensor not initialized yet when Tensor::type() is called.") @@ -114,9 +113,9 @@ class TensorBase { virtual size_t size() const = 0; - virtual std::type_index type() const = 0; + virtual std::string type() const = 0; - virtual void set_type(std::type_index type) = 0; + virtual void set_type(std::string type) = 0; virtual void resize(size_t size) = 0; }; diff --git a/src/framework/variable.h b/src/framework/variable.h index 5bff63f068..51997530e5 100644 --- a/src/framework/variable.h +++ b/src/framework/variable.h @@ -16,13 +16,10 @@ limitations under the License. */ #include #include -#include -#include -#include "../common/variant.h" +#include "common/variant.h" namespace paddle_mobile { namespace framework { -using std::string; class Variable { public: @@ -33,7 +30,7 @@ class Variable { template const T GetValue() const { - if (typeid(T) == typeid(std::string)) { + if (type_id().name() == type_id().name()) { PADDLE_MOBILE_THROW_EXCEPTION( "Please use getString to get an string (to avoid of an issue with " "gcc " @@ -60,38 +57,39 @@ class Variable { template bool IsType() const { - return holder_ != nullptr && holder_->Type() == typeid(T); + return holder_ != nullptr && holder_->Type() == type_id().name(); } void Clear() { holder_.reset(); } - std::type_index Type() const { return holder_->Type(); } + std::string Type() const { return holder_->Type(); } private: struct Placeholder { Placeholder() = default; virtual ~Placeholder() = default; - virtual const std::type_info &Type() const = 0; + virtual std::string Type() const = 0; virtual void *Ptr() const = 0; }; template struct PlaceholderImp : public Placeholder { - explicit PlaceholderImp(T *ptr) : ptr_(ptr), type_(typeid(T)) {} + explicit PlaceholderImp(T *ptr) : ptr_(ptr), type_(type_id().name()) {} - virtual const std::type_info &Type() const { return type_; } - virtual void *Ptr() const override { - return static_cast(ptr_.get()); - } + std::string Type() const override { return type_; } + void *Ptr() const override { return static_cast(ptr_.get()); } std::unique_ptr ptr_; - const std::type_info &type_; + std::string type_; }; - Variant variant; - std::unique_ptr holder_; + friend class Scope; - string name_; + + Variant variant; + std::unique_ptr holder_; + std::string name_; }; + } // namespace framework } // namespace paddle_mobile diff --git a/src/io/api_paddle_mobile.cc b/src/io/api_paddle_mobile.cc index 1f4769b282..3976ac26cc 100644 --- a/src/io/api_paddle_mobile.cc +++ b/src/io/api_paddle_mobile.cc @@ -128,7 +128,7 @@ void ConvertTensors(const framework::Tensor &src, PaddleTensor *des) { des->layout = src.layout == framework::LAYOUT_HWC ? LAYOUT_HWC : LAYOUT_CHW; auto num = src.numel(); - if (src.type() == typeid(float)) { + if (src.type() == type_id()) { des->data.Reset(const_cast(src.data()), num * sizeof(float)); } else { @@ -143,7 +143,7 @@ void PaddleMobilePredictor::FeedPaddleTensors( auto num = inputs.size(); std::vector tensors(num, framework::Tensor()); for (int i = 0; i < num; i++) { - tensors[i].init(typeid(float)); + tensors[i].init(type_id()); ConvertPaddleTensors(inputs[i], &tensors[i]); } paddle_mobile_->FeedTensorData(tensors); diff --git a/src/io/paddle_inference_api.h b/src/io/paddle_inference_api.h index e8c2c1daaa..30ac5ef8f0 100644 --- a/src/io/paddle_inference_api.h +++ b/src/io/paddle_inference_api.h @@ -24,7 +24,6 @@ limitations under the License. */ #include #include #include -#include #include namespace paddle_mobile { @@ -88,7 +87,6 @@ struct PaddleTensor { // TODO(Superjomn) for LoD support, add a vector> field if needed. PaddleBuf data; // blob of data. PaddleDType dtype; - std::type_index dtypeid = typeid(float); LayoutType layout; }; diff --git a/src/operators/beam_search_decode_op.cpp b/src/operators/beam_search_decode_op.cpp index 9b01d2e17f..1038234fe8 100644 --- a/src/operators/beam_search_decode_op.cpp +++ b/src/operators/beam_search_decode_op.cpp @@ -14,8 +14,6 @@ limitations under the License. */ #ifdef BEAM_SEARCH_DECODE_OP -#pragma once - #include "operators/beam_search_decode_op.h" namespace paddle_mobile { diff --git a/src/operators/beam_search_op.cpp b/src/operators/beam_search_op.cpp index 502510ebee..5f83e53667 100644 --- a/src/operators/beam_search_op.cpp +++ b/src/operators/beam_search_op.cpp @@ -14,8 +14,6 @@ limitations under the License. */ #ifdef BEAM_SEARCH_OP -#pragma once - #include "operators/beam_search_op.h" namespace paddle_mobile { diff --git a/src/operators/kernel/arm/compare_kernel.cpp b/src/operators/kernel/arm/compare_kernel.cpp index d83fae1748..e1a0f6f167 100644 --- a/src/operators/kernel/arm/compare_kernel.cpp +++ b/src/operators/kernel/arm/compare_kernel.cpp @@ -192,10 +192,10 @@ bool LessThanKernel::Init(CompareParam *param) { template <> void LessThanKernel::Compute(const CompareParam ¶m) { - if (param.input_x_->type() == typeid(int64_t)) { + if (param.input_x_->type() == type_id().name()) { CompareCompute()(param.input_x_, param.input_y_, param.axis_, param.output_); - } else if (param.input_x_->type() == typeid(float)) { + } else if (param.input_x_->type() == type_id().name()) { CompareCompute()(param.input_x_, param.input_y_, param.axis_, param.output_); } else { diff --git a/src/operators/kernel/arm/concat_kernel.cpp b/src/operators/kernel/arm/concat_kernel.cpp index 3c6a6f151f..efee9cff28 100644 --- a/src/operators/kernel/arm/concat_kernel.cpp +++ b/src/operators/kernel/arm/concat_kernel.cpp @@ -27,7 +27,7 @@ bool ConcatKernel::Init(ConcatParam *param) { template <> void ConcatKernel::Compute(const ConcatParam ¶m) { - if (param.Inputs()[0]->type() == typeid(int8_t)) { + if (param.Inputs()[0]->type() == type_id().name()) { ConcatCompute(param); } else { ConcatCompute(param); diff --git a/src/operators/kernel/arm/convolution/conv_common.cpp b/src/operators/kernel/arm/convolution/conv_common.cpp index 86c6e8d337..d96eef35c8 100644 --- a/src/operators/kernel/arm/convolution/conv_common.cpp +++ b/src/operators/kernel/arm/convolution/conv_common.cpp @@ -28,7 +28,7 @@ void InitBaseConvKernel(ConvParam *param) { bool depth5x5 = conv5x5 && param->Groups() == param->Input()->dims()[1] && param->Input()->dims()[1] == param->Output()->dims()[1]; - if (param->Filter()->type() == typeid(int8_t)) { + if (param->Filter()->type() == type_id().name()) { #ifndef __aarch64__ if (depth3x3 && param->Strides()[0] < 3 && param->Strides()[0] == param->Strides()[1]) { diff --git a/src/operators/kernel/arm/tensor_array_read_write_kernel.cpp b/src/operators/kernel/arm/tensor_array_read_write_kernel.cpp index 72fbb4cadb..bdf10574a8 100644 --- a/src/operators/kernel/arm/tensor_array_read_write_kernel.cpp +++ b/src/operators/kernel/arm/tensor_array_read_write_kernel.cpp @@ -28,7 +28,9 @@ void WriteToArrayKernel::Compute( const WriteToArrayParam ¶m) { int64_t offset = param.index_->data()[0]; if (offset >= param.output_->size()) { - param.output_->resize(offset + 1); + while (param.output_->size() <= offset) { + param.output_->emplace_back(); + } } framework::LoDTensor *out_tensor = &(param.output_->at(offset)); diff --git a/src/operators/kernel/arm/transpose2_kernel.cpp b/src/operators/kernel/arm/transpose2_kernel.cpp index 6928df71e6..54f759f016 100644 --- a/src/operators/kernel/arm/transpose2_kernel.cpp +++ b/src/operators/kernel/arm/transpose2_kernel.cpp @@ -126,13 +126,13 @@ void Transpose2Kernel::Compute(const Transpose2Param ¶m) { const std::vector &axis = param.Axis(); bool shuffle_channel = IsShuffleChannel(axis); if (shuffle_channel) { - if (param.InputX()->type() == typeid(int8_t)) { + if (param.InputX()->type() == type_id().name()) { ShuffleChannelCompute(param); } else { ShuffleChannelCompute(param); } } else { - if (param.InputX()->type() == typeid(int8_t)) { + if (param.InputX()->type() == type_id().name()) { Transpose2Compute(param); } else { Transpose2Compute(param); diff --git a/src/operators/kernel/arm/while_kernel.cpp b/src/operators/kernel/arm/while_kernel.cpp index 63cd150ec9..4794fd122c 100644 --- a/src/operators/kernel/arm/while_kernel.cpp +++ b/src/operators/kernel/arm/while_kernel.cpp @@ -35,6 +35,7 @@ class StepExecutor { auto op_handler = framework::OpRegistry::CreateOp( op_desc->Type(), op_desc->GetInputs(), op_desc->GetOutputs(), op_desc->GetAttrMap(), scope_); + op_handler->Init(); ops_of_block_[i] = op_handler; } } diff --git a/src/operators/kernel/central-arm-func/mul_arm_func.h b/src/operators/kernel/central-arm-func/mul_arm_func.h index 1f22ab9898..4b697c0d13 100644 --- a/src/operators/kernel/central-arm-func/mul_arm_func.h +++ b/src/operators/kernel/central-arm-func/mul_arm_func.h @@ -37,7 +37,7 @@ void MulCompute(const MulParam ¶m) { if (out_dim.size() != 2) { out->Resize({x_matrix.dims()[0], y_matrix.dims()[1]}); } - if (param.InputX()->type() == typeid(int8_t)) { + if (param.InputX()->type() == type_id().name()) { out->mutable_data(); math::MatMul(x_matrix, false, y_matrix, false, static_cast(1), out, diff --git a/src/operators/kernel/central-arm-func/sum_arm_func.h b/src/operators/kernel/central-arm-func/sum_arm_func.h index 36c7ac9694..eb1e830849 100644 --- a/src/operators/kernel/central-arm-func/sum_arm_func.h +++ b/src/operators/kernel/central-arm-func/sum_arm_func.h @@ -144,7 +144,8 @@ void SumCompute(const SumParam ¶m) { } } else { PADDLE_MOBILE_THROW_EXCEPTION( - "Unexpected branch, output variable type is %s", outvar->Type().name()); + "Unexpected branch, output variable type is %s", + outvar->Type().c_str()); } } } // namespace operators diff --git a/src/operators/math/gemm/cblas.cc b/src/operators/math/gemm/cblas.cc index 058b61f111..4428826552 100644 --- a/src/operators/math/gemm/cblas.cc +++ b/src/operators/math/gemm/cblas.cc @@ -14,8 +14,6 @@ limitations under the License. */ #if defined(__ARM_NEON__) || defined(__ARM_NEON) -#pragma once - #include "operators/math/gemm/cblas.h" #include "operators/math/gemm/executor.h" #include "operators/math/gemm/strategy.h" diff --git a/src/operators/one_hot_op.cpp b/src/operators/one_hot_op.cpp index 396f55318a..4367c2161e 100644 --- a/src/operators/one_hot_op.cpp +++ b/src/operators/one_hot_op.cpp @@ -14,8 +14,6 @@ limitations under the License. */ #ifdef ONE_HOT_OP -#pragma once - #include "operators/one_hot_op.h" namespace paddle_mobile { diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 8679174e4c..e144bf623f 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -19,6 +19,7 @@ limitations under the License. */ #include "common/log.h" #include "common/type_define.h" #include "common/types.h" +#include "framework/attribute.h" #include "framework/lod_tensor.h" #include "framework/scope.h" #include "framework/tensor.h" -- GitLab