提交 1efa91dd 编写于 作者: S superjomn

add hash helper function

上级 cd7018bf
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/lite/core/target_wrapper.h"
#include "paddle/fluid/lite/utils/all.h"
namespace paddle {
namespace lite {
size_t Place::hash() const {
std::hash<int> h;
size_t hash = h(static_cast<int>(target));
hash = hash_combine(hash, static_cast<int>(precision));
hash = hash_combine(hash, static_cast<int>(layout));
hash = hash_combine(hash, static_cast<int>(device));
return hash;
}
} // namespace lite
} // namespace paddle
\ No newline at end of file
......@@ -14,13 +14,20 @@
#pragma once
#include <iostream>
#include <sstream>
namespace paddle {
namespace lite {
enum class TargetType : int { kHost = 0, kX86, kCUDA, kLastAsPlaceHolder };
enum class PrecisionType : int { kFloat = 0, kInt8, kLastAsPlaceHolder };
enum class DataLayoutType : int { kNCHW = 0, kLastAsPlaceHolder };
enum class TargetType : int {
kUnk = 0,
kHost,
kX86,
kCUDA,
kLastAsPlaceHolder
};
enum class PrecisionType : int { kUnk = 0, kFloat, kInt8, kLastAsPlaceHolder };
enum class DataLayoutType : int { kUnk = 0, kNCHW, kLastAsPlaceHolder };
// Some helper macro to get a specific TargetType.
#define TARGET(item__) paddle::lite::TargetType::item__
......@@ -30,14 +37,34 @@ enum class DataLayoutType : int { kNCHW = 0, kLastAsPlaceHolder };
#define PRECISION_VAL(item__) static_cast<int>(PRECISION(item__))
#define DATALAYOUT(item__) paddle::lite::DataLayoutType::item__
constexpr const int kNumPrecisions =
PRECISION_VAL(kLastAsPlaceHolder) - PRECISION_VAL(kFloat);
constexpr const int kNumTargets =
TARGET_VAL(kLastAsPlaceHolder) - TARGET_VAL(kHost);
static const std::string target2string[] = {"unk", "host", "x86", "cuda"};
static const std::string& TargetToStr(TargetType target) {
return target2string[static_cast<int>(target)];
}
static const std::string precision2string[] = {"unk", "float", "int8"};
static const std::string& PrecisionToStr(PrecisionType precision) {
return precision2string[static_cast<int>(precision)];
}
static const std::string datalayout2string[] = {"unk", "NCHW"};
static const std::string& DataLayoutToStr(DataLayoutType x) {
return datalayout2string[static_cast<int>(x)];
}
/*
* Place specifies the execution context of a Kernel or input/output for a
* kernel. It is used to make the analysis of the MIR more clear and accurate.
*/
struct Place {
TargetType target{TARGET(kHost)};
PrecisionType precision{PRECISION(kFloat)};
DataLayoutType layout{DATALAYOUT(kNCHW)};
TargetType target{TARGET(kUnk)};
PrecisionType precision{PRECISION(kUnk)};
DataLayoutType layout{DATALAYOUT(kUnk)};
short device{0}; // device ID
Place() = default;
......@@ -45,31 +72,33 @@ struct Place {
DataLayoutType layout = DATALAYOUT(kNCHW), short device = 0)
: target(target), precision(precision), layout(layout), device(device) {}
bool is_valid() const {
return target != TARGET(kUnk) && precision != PRECISION(kUnk) &&
layout != DATALAYOUT(kUnk);
}
size_t hash() const;
bool operator==(const Place& other) const {
return target == other.target && precision == other.precision &&
layout == other.layout && device == other.device;
}
};
constexpr const int kNumPrecisions =
PRECISION_VAL(kLastAsPlaceHolder) - PRECISION_VAL(kFloat);
constexpr const int kNumTargets =
TARGET_VAL(kLastAsPlaceHolder) - TARGET_VAL(kHost);
static const std::string target2string[] = {"host", "x86", "cuda"};
static const std::string& TargetToStr(TargetType target) {
return target2string[static_cast<int>(target)];
}
static const std::string precision2string[] = {"float", "int8"};
static const std::string& PrecisionToStr(PrecisionType precision) {
return precision2string[static_cast<int>(precision)];
}
friend bool operator<(const Place& a, const Place& b) {
if (a.target != b.target) return a.target < b.target;
if (a.precision != b.precision) return a.precision < b.precision;
if (a.layout != b.layout) return a.layout < b.layout;
if (a.device != b.device) return a.device < b.device;
return true;
}
static const std::string datalayout2string[] = {"NCHW"};
static const std::string& DataLayoutToStr(DataLayoutType x) {
return datalayout2string[static_cast<int>(x)];
}
std::string DebugString() const {
std::stringstream os;
os << TargetToStr(target) << "/" << PrecisionToStr(precision) << "/"
<< DataLayoutToStr(layout);
return os.str();
}
};
// Event sync for multi-stream devices like CUDA and OpenCL.
// For the devices without support of stream, leave it empty.
......
......@@ -16,5 +16,6 @@
#include "paddle/fluid/lite/utils/check.h"
#include "paddle/fluid/lite/utils/factory.h"
#include "paddle/fluid/lite/utils/hash.h"
#include "paddle/fluid/lite/utils/macros.h"
#include "paddle/fluid/lite/utils/varient.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <functional>
namespace paddle {
namespace lite {
template <typename T>
inline size_t hash_combine(size_t s, const T& v) {
std::hash<T> h;
return (s ^ h(v)) + 0x9e3779b9 + (s << 6) + (s >> 2);
}
} // namespace lite
} // namespace paddle
......@@ -14,6 +14,7 @@
#include "paddle/fluid/lite/core/target_wrapper.h"
#include <algorithm>
#include "paddle/fluid/lite/utils/all.h"
namespace paddle {
namespace lite {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册