diff --git a/paddle/fluid/lite/core/target_wrapper.cc b/paddle/fluid/lite/core/target_wrapper.cc new file mode 100644 index 0000000000000000000000000000000000000000..176a2cb2472da3a0cb8c4b81a17273d014020d42 --- /dev/null +++ b/paddle/fluid/lite/core/target_wrapper.cc @@ -0,0 +1,31 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/core/target_wrapper.h" +#include "paddle/fluid/lite/utils/all.h" + +namespace paddle { +namespace lite { + +size_t Place::hash() const { + std::hash h; + size_t hash = h(static_cast(target)); + hash = hash_combine(hash, static_cast(precision)); + hash = hash_combine(hash, static_cast(layout)); + hash = hash_combine(hash, static_cast(device)); + return hash; +} + +} // namespace lite +} // namespace paddle \ No newline at end of file diff --git a/paddle/fluid/lite/core/target_wrapper.h b/paddle/fluid/lite/core/target_wrapper.h index 90c701c340206e05e7dbe853ff161706e53430e3..7713938774af150d1c954f0bf1e91d50620829b7 100644 --- a/paddle/fluid/lite/core/target_wrapper.h +++ b/paddle/fluid/lite/core/target_wrapper.h @@ -14,13 +14,20 @@ #pragma once #include +#include namespace paddle { namespace lite { -enum class TargetType : int { kHost = 0, kX86, kCUDA, kLastAsPlaceHolder }; -enum class PrecisionType : int { kFloat = 0, kInt8, kLastAsPlaceHolder }; -enum class DataLayoutType : int { kNCHW = 0, kLastAsPlaceHolder }; +enum class TargetType : int { + kUnk = 0, + kHost, + kX86, + kCUDA, + kLastAsPlaceHolder +}; +enum class PrecisionType : int { kUnk = 0, kFloat, kInt8, kLastAsPlaceHolder }; +enum class DataLayoutType : int { kUnk = 0, kNCHW, kLastAsPlaceHolder }; // Some helper macro to get a specific TargetType. #define TARGET(item__) paddle::lite::TargetType::item__ @@ -30,14 +37,34 @@ enum class DataLayoutType : int { kNCHW = 0, kLastAsPlaceHolder }; #define PRECISION_VAL(item__) static_cast(PRECISION(item__)) #define DATALAYOUT(item__) paddle::lite::DataLayoutType::item__ +constexpr const int kNumPrecisions = + PRECISION_VAL(kLastAsPlaceHolder) - PRECISION_VAL(kFloat); +constexpr const int kNumTargets = + TARGET_VAL(kLastAsPlaceHolder) - TARGET_VAL(kHost); + +static const std::string target2string[] = {"unk", "host", "x86", "cuda"}; +static const std::string& TargetToStr(TargetType target) { + return target2string[static_cast(target)]; +} + +static const std::string precision2string[] = {"unk", "float", "int8"}; +static const std::string& PrecisionToStr(PrecisionType precision) { + return precision2string[static_cast(precision)]; +} + +static const std::string datalayout2string[] = {"unk", "NCHW"}; +static const std::string& DataLayoutToStr(DataLayoutType x) { + return datalayout2string[static_cast(x)]; +} + /* * Place specifies the execution context of a Kernel or input/output for a * kernel. It is used to make the analysis of the MIR more clear and accurate. */ struct Place { - TargetType target{TARGET(kHost)}; - PrecisionType precision{PRECISION(kFloat)}; - DataLayoutType layout{DATALAYOUT(kNCHW)}; + TargetType target{TARGET(kUnk)}; + PrecisionType precision{PRECISION(kUnk)}; + DataLayoutType layout{DATALAYOUT(kUnk)}; short device{0}; // device ID Place() = default; @@ -45,31 +72,33 @@ struct Place { DataLayoutType layout = DATALAYOUT(kNCHW), short device = 0) : target(target), precision(precision), layout(layout), device(device) {} + bool is_valid() const { + return target != TARGET(kUnk) && precision != PRECISION(kUnk) && + layout != DATALAYOUT(kUnk); + } + + size_t hash() const; + bool operator==(const Place& other) const { return target == other.target && precision == other.precision && layout == other.layout && device == other.device; } -}; - -constexpr const int kNumPrecisions = - PRECISION_VAL(kLastAsPlaceHolder) - PRECISION_VAL(kFloat); -constexpr const int kNumTargets = - TARGET_VAL(kLastAsPlaceHolder) - TARGET_VAL(kHost); - -static const std::string target2string[] = {"host", "x86", "cuda"}; -static const std::string& TargetToStr(TargetType target) { - return target2string[static_cast(target)]; -} -static const std::string precision2string[] = {"float", "int8"}; -static const std::string& PrecisionToStr(PrecisionType precision) { - return precision2string[static_cast(precision)]; -} + friend bool operator<(const Place& a, const Place& b) { + if (a.target != b.target) return a.target < b.target; + if (a.precision != b.precision) return a.precision < b.precision; + if (a.layout != b.layout) return a.layout < b.layout; + if (a.device != b.device) return a.device < b.device; + return true; + } -static const std::string datalayout2string[] = {"NCHW"}; -static const std::string& DataLayoutToStr(DataLayoutType x) { - return datalayout2string[static_cast(x)]; -} + std::string DebugString() const { + std::stringstream os; + os << TargetToStr(target) << "/" << PrecisionToStr(precision) << "/" + << DataLayoutToStr(layout); + return os.str(); + } +}; // Event sync for multi-stream devices like CUDA and OpenCL. // For the devices without support of stream, leave it empty. diff --git a/paddle/fluid/lite/utils/all.h b/paddle/fluid/lite/utils/all.h index 7730bfb90306936ed2286707b94fe85db0c7b66a..d9a4867717e13e56a2ccc55b891e0db07d791265 100644 --- a/paddle/fluid/lite/utils/all.h +++ b/paddle/fluid/lite/utils/all.h @@ -16,5 +16,6 @@ #include "paddle/fluid/lite/utils/check.h" #include "paddle/fluid/lite/utils/factory.h" +#include "paddle/fluid/lite/utils/hash.h" #include "paddle/fluid/lite/utils/macros.h" #include "paddle/fluid/lite/utils/varient.h" diff --git a/paddle/fluid/lite/utils/hash.h b/paddle/fluid/lite/utils/hash.h new file mode 100644 index 0000000000000000000000000000000000000000..a1fa3be02e58f0908b108a65431ca1993512c821 --- /dev/null +++ b/paddle/fluid/lite/utils/hash.h @@ -0,0 +1,28 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include + +namespace paddle { +namespace lite { + +template +inline size_t hash_combine(size_t s, const T& v) { + std::hash h; + return (s ^ h(v)) + 0x9e3779b9 + (s << 6) + (s >> 2); +} + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/x86/target_wrapper.cc b/paddle/fluid/lite/x86/target_wrapper.cc index 83250bcb498b8020eb3d0f417b93080c5aaee61e..3374cdd73c0b57a4743b252e3b4b3cbd59f1e8b5 100644 --- a/paddle/fluid/lite/x86/target_wrapper.cc +++ b/paddle/fluid/lite/x86/target_wrapper.cc @@ -14,6 +14,7 @@ #include "paddle/fluid/lite/core/target_wrapper.h" #include +#include "paddle/fluid/lite/utils/all.h" namespace paddle { namespace lite {