diff --git a/lite/api/paddle_place.cc b/lite/api/paddle_place.cc index cb70fd43cdd80713a6a47afb61bbde24e131a5fa..9bc63e78aae92556a312eb36c3415f9d57c2239a 100644 --- a/lite/api/paddle_place.cc +++ b/lite/api/paddle_place.cc @@ -24,9 +24,9 @@ namespace lite_api { size_t Place::hash() const { std::hash h; size_t hash = h(static_cast(target)); - hash = lite::hash_combine(hash, static_cast(precision)); - hash = lite::hash_combine(hash, static_cast(layout)); - hash = lite::hash_combine(hash, static_cast(device)); + lite::CombineHash(static_cast(precision), &hash); + lite::CombineHash(static_cast(layout), &hash); + lite::CombineHash(static_cast(device), &hash); return hash; } diff --git a/lite/core/op_lite.cc b/lite/core/op_lite.cc index de76f404f8a129eb94e645dc731a0d09c1ee3c77..7428a16d0afab9dba126189c19d0eb99f0d8e31d 100644 --- a/lite/core/op_lite.cc +++ b/lite/core/op_lite.cc @@ -42,16 +42,14 @@ bool OpLite::InferShapeWithCache() { // combined dims value into new_hash value. auto &element_dims = (*iter)->dims(); for (size_t i = 0; i < element_dims.size(); i++) { - new_hash = - lite::hash_combine(new_hash, static_cast(element_dims[i])); + lite::CombineHash(static_cast(element_dims[i]), &new_hash); } // combine lod value into new_hash valud. auto &emement_lods = (*iter)->lod(); for (auto lod_iter = emement_lods.begin(); lod_iter != emement_lods.end(); lod_iter++) { for (size_t i = 0; i < lod_iter->size(); i++) { - new_hash = - lite::hash_combine(new_hash, static_cast(lod_iter->at(i))); + lite::CombineHash(static_cast(lod_iter->at(i)), &new_hash); } } } diff --git a/lite/core/type_system.cc b/lite/core/type_system.cc index 276d0c4a349794bed0ece755c924cf789a7cf54e..aaafd29841f44e671460a4c45babc7a8f663dacf 100644 --- a/lite/core/type_system.cc +++ b/lite/core/type_system.cc @@ -21,9 +21,9 @@ namespace lite { size_t ParamTypeRegistry::KernelIdTy::hash() const { std::hash h; size_t hash = h(kernel_type); - hash = hash_combine(hash, place.hash()); - hash = hash_combine(hash, std::hash()(static_cast(io))); - hash = hash_combine(hash, std::hash()(arg_name)); + lite::CombineHash(place.hash(), &hash); + lite::CombineHash(std::hash()(static_cast(io)), &hash); + lite::CombineHash(std::hash()(arg_name), &hash); return hash; } @@ -48,8 +48,7 @@ const Type *Type::GetTensorTy(TargetType target, // NOTE quite naive implementation here, but not performance sensitive. DataType::ID type_id = DataType::ID::Tensor; -#define HASH_ONE(x) v = hash_combine(v, hasher(static_cast(x))) - +#define HASH_ONE(x) CombineHash(hasher(static_cast(x)), &v); std::hash hasher; size_t v = hasher(static_cast(type_id)); HASH_ONE(target); @@ -80,8 +79,7 @@ const Type *Type::GetTensorListTy(TargetType target, static std::map type_repo; DataType::ID type_id = DataType::ID::TensorList; -#define HASH_ONE(x) v = hash_combine(v, hasher(static_cast(x))) - +#define HASH_ONE(x) CombineHash(hasher(static_cast(x)), &v); std::hash hasher; size_t v = hasher(static_cast(type_id)); HASH_ONE(target); diff --git a/lite/utils/hash.h b/lite/utils/hash.h index a1fa3be02e58f0908b108a65431ca1993512c821..0135b53a8609a2a8168a25727738afbda4398dc7 100644 --- a/lite/utils/hash.h +++ b/lite/utils/hash.h @@ -18,10 +18,11 @@ namespace paddle { namespace lite { +// A simplified implementation of boost::hash_combine. template -inline size_t hash_combine(size_t s, const T& v) { +inline void CombineHash(const T& from, size_t* to) { std::hash h; - return (s ^ h(v)) + 0x9e3779b9 + (s << 6) + (s >> 2); + *to ^= h(from) + 0x9e3779b9 + (*to << 6) + (*to >> 2); } } // namespace lite