未验证 提交 667f88f9 编写于 作者: J Jiabin Yang 提交者: GitHub

Fix/gcc 4.8 ubt link error (#18558)

* test=develop, fix docker with paddle nccl problem

* test=develop, fix/gcc_4.8_ubt_link_error

* test=develop, fix code format
上级 0caa08ea
......@@ -15,6 +15,7 @@
#pragma once
#include <map>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/tensor.h"
......@@ -52,11 +53,11 @@ inline DataLayout ToPaddleLayout(const MKLDNNFormat& format) {
inline MKLDNNDataType ToMKLDNNDataType(proto::VarType::Type type) {
static std::unordered_map<int, MKLDNNDataType> dict{
{DataTypeTrait<float>::DataType, MKLDNNDataType::f32},
{DataTypeTrait<int8_t>::DataType, MKLDNNDataType::s8},
{DataTypeTrait<uint8_t>::DataType, MKLDNNDataType::u8},
{DataTypeTrait<int16_t>::DataType, MKLDNNDataType::s16},
{DataTypeTrait<int32_t>::DataType, MKLDNNDataType::s32}};
{DataTypeTrait<float>::DataType(), MKLDNNDataType::f32},
{DataTypeTrait<int8_t>::DataType(), MKLDNNDataType::s8},
{DataTypeTrait<uint8_t>::DataType(), MKLDNNDataType::u8},
{DataTypeTrait<int16_t>::DataType(), MKLDNNDataType::s16},
{DataTypeTrait<int32_t>::DataType(), MKLDNNDataType::s32}};
auto iter = dict.find(static_cast<int>(type));
if (iter != dict.end()) return iter->second;
return MKLDNNDataType::data_undef;
......
......@@ -28,7 +28,9 @@ struct DataTypeTrait {};
// Stub handle for void
template <>
struct DataTypeTrait<void> {
constexpr static auto DataType = proto::VarType::RAW;
constexpr static proto::VarType::Type DataType() {
return proto::VarType::RAW;
}
};
#define _ForEachDataTypeHelper_(callback, cpp_type, proto_type) \
......@@ -45,10 +47,10 @@ struct DataTypeTrait<void> {
_ForEachDataTypeHelper_(callback, int16_t, INT16); \
_ForEachDataTypeHelper_(callback, int8_t, INT8)
#define DefineDataTypeTrait(cpp_type, proto_type) \
template <> \
struct DataTypeTrait<cpp_type> { \
constexpr static auto DataType = proto_type; \
#define DefineDataTypeTrait(cpp_type, proto_type) \
template <> \
struct DataTypeTrait<cpp_type> { \
constexpr static proto::VarType::Type DataType() { return proto_type; } \
}
_ForEachDataType_(DefineDataTypeTrait);
......
......@@ -24,10 +24,10 @@ template <typename T>
inline const T* Tensor::data() const {
check_memory_size();
bool valid =
std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType;
std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType();
PADDLE_ENFORCE(
valid, "Tensor holds the wrong type, it holds %s, but desires to be %s",
DataTypeToString(type_), DataTypeToString(DataTypeTrait<T>::DataType));
DataTypeToString(type_), DataTypeToString(DataTypeTrait<T>::DataType()));
return reinterpret_cast<const T*>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
......@@ -39,10 +39,10 @@ template <typename T>
inline T* Tensor::data() {
check_memory_size();
bool valid =
std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType;
std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType();
PADDLE_ENFORCE(
valid, "Tensor holds the wrong type, it holds %s, but desires to be %s",
DataTypeToString(type_), DataTypeToString(DataTypeTrait<T>::DataType));
DataTypeToString(type_), DataTypeToString(DataTypeTrait<T>::DataType()));
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_);
}
......@@ -59,7 +59,7 @@ template <typename T>
inline T* Tensor::mutable_data(platform::Place place, size_t requested_size) {
static_assert(std::is_pod<T>::value, "T must be POD");
return reinterpret_cast<T*>(
mutable_data(place, DataTypeTrait<T>::DataType, requested_size));
mutable_data(place, DataTypeTrait<T>::DataType(), requested_size));
}
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
......
......@@ -280,13 +280,13 @@ bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs,
auto type = fetch.type();
auto output = &(outputs->at(i));
output->name = fetchs_[idx]->Input("X")[0];
if (type == framework::DataTypeTrait<float>::DataType) {
if (type == framework::DataTypeTrait<float>::DataType()) {
GetFetchOne<float>(fetch, output);
output->dtype = PaddleDType::FLOAT32;
} else if (type == framework::DataTypeTrait<int64_t>::DataType) {
} else if (type == framework::DataTypeTrait<int64_t>::DataType()) {
GetFetchOne<int64_t>(fetch, output);
output->dtype = PaddleDType::INT64;
} else if (type == framework::DataTypeTrait<int32_t>::DataType) {
} else if (type == framework::DataTypeTrait<int32_t>::DataType()) {
GetFetchOne<int32_t>(fetch, output);
output->dtype = PaddleDType::INT32;
} else {
......
......@@ -103,8 +103,8 @@ framework::OpKernelType ConvOp::GetExpectedKernelType(
library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN;
customized_type_value =
(input_data_type == framework::DataTypeTrait<int8_t>::DataType ||
input_data_type == framework::DataTypeTrait<uint8_t>::DataType)
(input_data_type == framework::DataTypeTrait<int8_t>::DataType() ||
input_data_type == framework::DataTypeTrait<uint8_t>::DataType())
? kConvMKLDNNINT8
: kConvMKLDNNFP32;
}
......
......@@ -87,10 +87,10 @@ class PriorBoxOp : public framework::OperatorWithKernel {
auto input_image_type = ctx.Input<framework::Tensor>("Image")->type();
int customized_type_value =
framework::OpKernelType::kDefaultCustomizedTypeValue;
if (input_image_type == framework::DataTypeTrait<float>::DataType) {
if (input_image_type == framework::DataTypeTrait<float>::DataType()) {
customized_type_value = kPriorBoxFLOAT;
} else if (input_image_type ==
framework::DataTypeTrait<double>::DataType) {
framework::DataTypeTrait<double>::DataType()) {
customized_type_value = kPriorBoxDOUBLE;
}
return framework::OpKernelType(input_input_type, ctx.GetPlace(), layout_,
......
......@@ -358,13 +358,13 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto dst_dt = unsigned_output
? paddle::framework::ToMKLDNNDataType(
framework::DataTypeTrait<uint8_t>::DataType)
framework::DataTypeTrait<uint8_t>::DataType())
: paddle::framework::ToMKLDNNDataType(
framework::DataTypeTrait<int8_t>::DataType);
framework::DataTypeTrait<int8_t>::DataType());
if (force_fp32_output) {
dst_dt = paddle::framework::ToMKLDNNDataType(
framework::DataTypeTrait<float>::DataType);
framework::DataTypeTrait<float>::DataType());
}
if (fuse_residual_conn) {
......
......@@ -917,7 +917,7 @@ static void SetDstMemoryQuantized(
auto dst_md = platform::MKLDNNMemDesc(
{dst_tz}, paddle::framework::ToMKLDNNDataType(
framework::DataTypeTrait<T>::DataType),
framework::DataTypeTrait<T>::DataType()),
dst_fmt);
dst_pd.reset(new mkldnn::memory::primitive_desc(dst_md, engine));
dst_memory.reset(new mkldnn::memory(*dst_pd, to_void_cast<T>(output_data)));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册