diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index d29d8ce1c561e45980d10c17c984ca2ed3b453f3..2d0611326be2fb2ddfef5cea0c11d2f935d20888 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" @@ -68,9 +69,9 @@ std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { // only print first ten elements int64_t size = t.numel() < 10 ? t.numel() : 10; for (int64_t i = 0; i < size; ++i) { - if (t.type().hash_code() == typeid(float).hash_code()) { + if (IsType(t.type())) { os << t.data()[i] << " "; - } else if (t.type().hash_code() == typeid(int64_t).hash_code()) { + } else if (IsType(t.type())) { os << t.data()[i] << " "; } else { PADDLE_THROW("LoDTensor data type not in [float, int64_t]"); @@ -384,7 +385,7 @@ void LoDTensor::MergeLoDTensor( LoD new_lod = lod_tensors[0]->lod(); for (size_t i = 1; i < lod_tensors.size(); ++i) { auto *t = lod_tensors[i]; - PADDLE_ENFORCE_EQ(new_type.hash_code(), t->type().hash_code()); + PADDLE_ENFORCE_EQ(new_type, t->type()); PADDLE_ENFORCE_EQ(new_layout, t->layout()); PADDLE_ENFORCE_EQ(framework::product(new_dim) / new_dim[0], diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index c1329b06d7e9bcd6604fed14cefa305339c5c4b8..4fb2bb45fed6415cdaddb0166e2e2f2819cafeb0 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -592,8 +592,7 @@ static void CheckTensorNANOrInf(const std::string& name, if (tensor.memory_size() == 0) { return; } - if (tensor.type().hash_code() != typeid(float).hash_code() && // NOLINT - tensor.type().hash_code() != typeid(double).hash_code()) { // NOLINT + if (!IsType(tensor.type()) && !IsType(tensor.type())) { return; } PADDLE_ENFORCE(!framework::TensorContainsInf(tensor), diff --git a/paddle/fluid/framework/var_type.h b/paddle/fluid/framework/var_type.h index 2b646d78f0b23ec3e065c891826856c2341d4ac1..429997c8b89fef7aa164e878095ab3b5c9998e5b 100644 --- a/paddle/fluid/framework/var_type.h +++ b/paddle/fluid/framework/var_type.h @@ -24,18 +24,24 @@ limitations under the License. */ namespace paddle { namespace framework { + +template +bool IsType(const std::type_index& type_index) { + return type_index == std::type_index(typeid(T)); +} + inline proto::VarType::Type ToVarType(std::type_index type) { - if (type.hash_code() == typeid(LoDTensor).hash_code()) { + if (IsType(type)) { return proto::VarType_Type_LOD_TENSOR; - } else if (type.hash_code() == typeid(LoDRankTable).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_LOD_RANK_TABLE; - } else if (type.hash_code() == typeid(LoDTensorArray).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_LOD_TENSOR_ARRAY; - } else if (type.hash_code() == typeid(SelectedRows).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_SELECTED_ROWS; - } else if (type.hash_code() == typeid(ReaderHolder).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_READER; - } else if (type.hash_code() == typeid(ChannelHolder).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_CHANNEL; } else { PADDLE_THROW("ToVarType:Unsupported type %s", type.name()); diff --git a/paddle/fluid/inference/analysis/helper.h b/paddle/fluid/inference/analysis/helper.h index fff1621d3f1bb31cfa04110d1f3cf5dbfe927331..f1064cd20f28092d80d3fd23a862da080b6cc2f3 100644 --- a/paddle/fluid/inference/analysis/helper.h +++ b/paddle/fluid/inference/analysis/helper.h @@ -16,6 +16,7 @@ limitations under the License. */ #include #include +#include #include #include @@ -41,7 +42,7 @@ int AccuDims(Vec &&vec, int size) { return res; } -#define SET_TYPE(type__) dic_[typeid(type__).hash_code()] = #type__; +#define SET_TYPE(type__) dic_[std::type_index(typeid(type__))] = #type__; /* * Map typeid to representation. */ @@ -53,14 +54,14 @@ struct DataTypeNamer { template const std::string &repr() const { - auto x = typeid(T).hash_code(); + auto x = std::type_index(typeid(T)); PADDLE_ENFORCE(dic_.count(x), "unknown type for representation"); return dic_.at(x); } - const std::string &repr(size_t &hash) const { // NOLINT - PADDLE_ENFORCE(dic_.count(hash), "unknown type for representation"); - return dic_.at(hash); + const std::string &repr(const std::type_index &type) const { // NOLINT + PADDLE_ENFORCE(dic_.count(type), "unknown type for representation"); + return dic_.at(type); } private: @@ -71,9 +72,7 @@ struct DataTypeNamer { SET_TYPE(void *); } - std::unordered_map - dic_; + std::unordered_map dic_; }; #undef SET_TYPE diff --git a/paddle/fluid/inference/analysis/node.cc b/paddle/fluid/inference/analysis/node.cc index d9d265d225bb77a3f5f83cbd0b8b1c670fb34a31..f2e918f3ff41d9db0c3ec38561015967bed26f4e 100644 --- a/paddle/fluid/inference/analysis/node.cc +++ b/paddle/fluid/inference/analysis/node.cc @@ -23,9 +23,9 @@ namespace analysis { template <> std::string &NodeAttr::As() { if (data_.empty()) { - type_hash_ = typeid(std::string).hash_code(); + type_index_ = std::type_index(typeid(std::string)); } - PADDLE_ENFORCE_EQ(type_hash_, typeid(std::string).hash_code()); + PADDLE_ENFORCE_EQ(type_index_, std::type_index(typeid(std::string))); return data_; } diff --git a/paddle/fluid/inference/analysis/node.h b/paddle/fluid/inference/analysis/node.h index 8ecd1ae730e6ec6775f4a22fdc5dec0e8ca8e2d1..47e524bc5c4a6b1324d5f182053129311487522d 100644 --- a/paddle/fluid/inference/analysis/node.h +++ b/paddle/fluid/inference/analysis/node.h @@ -25,6 +25,7 @@ limitations under the License. */ #include #include +#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/inference/analysis/device.h" #include "paddle/fluid/inference/analysis/dot.h" #include "paddle/fluid/inference/analysis/helper.h" @@ -57,12 +58,12 @@ struct NodeAttr { // init storage in the first usage. if (data_.empty()) { VLOG(4) << "resize data to " << sizeof(T); - type_hash_ = typeid(T).hash_code(); + type_index_ = std::type_index(typeid(T)); data_.resize(sizeof(T)); } - PADDLE_ENFORCE(type_hash_ == typeid(T).hash_code(), + PADDLE_ENFORCE(framework::IsType(type_index_), "type not matched, origin is %s, want %s", - DataTypeNamer::Global().repr(type_hash_), + DataTypeNamer::Global().repr(type_index_), DataTypeNamer::Global().repr()); PADDLE_ENFORCE_EQ(data_.size(), sizeof(T), "Node attr type recast error"); return *reinterpret_cast(&data_[0]); @@ -70,7 +71,7 @@ struct NodeAttr { private: std::string data_; - size_t type_hash_{std::numeric_limits::max()}; + std::type_index type_index_{typeid(NodeAttr)}; }; /* diff --git a/paddle/fluid/operators/conditional_block_op.cc b/paddle/fluid/operators/conditional_block_op.cc index 5984f80d04bdeb232f8e24264ae979725af24ef4..8cc1d94260baccfe28d213b7e021956819e2e79e 100644 --- a/paddle/fluid/operators/conditional_block_op.cc +++ b/paddle/fluid/operators/conditional_block_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/var_type.h" namespace paddle { namespace operators { @@ -47,7 +48,7 @@ class ConditionalOp : public framework::OperatorBase { if (!(ips.size() == 1UL && ips[0]->IsInitialized())) { PADDLE_THROW("should have one initialized input as condition"); } - if (!(ips[0]->type().hash_code() == typeid(bool).hash_code() && // NOLINT + if (!(framework::IsType(ips[0]->type()) && // NOLINT ips[0]->numel() == 1)) { PADDLE_THROW( "condition input's data type should be bool, " diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index db7634918a5179a61304315ecd08350d23fb4642..cceac402951ae6bf3fe0b4c96af5b7ce9ca1ba0e 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -16,6 +16,7 @@ #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/framework/variable.h" namespace paddle { @@ -62,7 +63,7 @@ struct Formater { } } void PrintDtype() { - if (dtype.hash_code() != typeid(const char).hash_code()) { + if (!framework::IsType(dtype)) { CLOG << "\tdtype: " << dtype.name() << std::endl; } } @@ -83,15 +84,15 @@ struct Formater { void PrintData(size_t size) { PADDLE_ENFORCE_NOT_NULL(data); // print float - if (dtype.hash_code() == typeid(const float).hash_code()) { + if (framework::IsType(dtype)) { Display(size); - } else if (dtype.hash_code() == typeid(const double).hash_code()) { + } else if (framework::IsType(dtype)) { Display(size); - } else if (dtype.hash_code() == typeid(const int).hash_code()) { + } else if (framework::IsType(dtype)) { Display(size); - } else if (dtype.hash_code() == typeid(const int64_t).hash_code()) { + } else if (framework::IsType(dtype)) { Display(size); - } else if (dtype.hash_code() == typeid(const bool).hash_code()) { + } else if (framework::IsType(dtype)) { Display(size); } else { CLOG << "\tdata: unprintable type: " << dtype.name() << std::endl; diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index f440058e8db2024f5c8a0129db3af87a80d6e551..733157ea05ed39434b9a750e3a94ea548f512ce6 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/detail/safe_ref.h" namespace paddle { @@ -135,15 +136,14 @@ class WhileGradOp : public framework::OperatorBase { auto &og_inside = detail::Ref(cur_scope.Var(inside_og_name), "Cannot find inside gradient %s", inside_og_name); - if (og_outside.Type().hash_code() == - typeid(framework::LoDTensor).hash_code()) { + if (framework::IsType(og_outside.Type())) { auto &outside_tensor = og_outside.Get(); auto &inside_tensor = detail::Ref(og_inside.GetMutable()); inside_tensor.set_lod(outside_tensor.lod()); inside_tensor.ShareDataWith(outside_tensor); - } else if (og_outside.Type().hash_code() == - typeid(framework::LoDTensorArray).hash_code()) { + } else if (framework::IsType( + og_outside.Type())) { auto &outside_array = og_outside.Get(); auto &inside_array = detail::Ref(og_inside.GetMutable());