未验证 提交 667f88f9 编写于 作者: J Jiabin Yang 提交者: GitHub

Fix/gcc 4.8 ubt link error (#18558)

* test=develop, fix docker with paddle nccl problem

* test=develop, fix/gcc_4.8_ubt_link_error

* test=develop, fix code format
上级 0caa08ea
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#pragma once #pragma once
#include <map> #include <map>
#include <unordered_map>
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_kernel_type.h" #include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
...@@ -52,11 +53,11 @@ inline DataLayout ToPaddleLayout(const MKLDNNFormat& format) { ...@@ -52,11 +53,11 @@ inline DataLayout ToPaddleLayout(const MKLDNNFormat& format) {
inline MKLDNNDataType ToMKLDNNDataType(proto::VarType::Type type) { inline MKLDNNDataType ToMKLDNNDataType(proto::VarType::Type type) {
static std::unordered_map<int, MKLDNNDataType> dict{ static std::unordered_map<int, MKLDNNDataType> dict{
{DataTypeTrait<float>::DataType, MKLDNNDataType::f32}, {DataTypeTrait<float>::DataType(), MKLDNNDataType::f32},
{DataTypeTrait<int8_t>::DataType, MKLDNNDataType::s8}, {DataTypeTrait<int8_t>::DataType(), MKLDNNDataType::s8},
{DataTypeTrait<uint8_t>::DataType, MKLDNNDataType::u8}, {DataTypeTrait<uint8_t>::DataType(), MKLDNNDataType::u8},
{DataTypeTrait<int16_t>::DataType, MKLDNNDataType::s16}, {DataTypeTrait<int16_t>::DataType(), MKLDNNDataType::s16},
{DataTypeTrait<int32_t>::DataType, MKLDNNDataType::s32}}; {DataTypeTrait<int32_t>::DataType(), MKLDNNDataType::s32}};
auto iter = dict.find(static_cast<int>(type)); auto iter = dict.find(static_cast<int>(type));
if (iter != dict.end()) return iter->second; if (iter != dict.end()) return iter->second;
return MKLDNNDataType::data_undef; return MKLDNNDataType::data_undef;
......
...@@ -28,7 +28,9 @@ struct DataTypeTrait {}; ...@@ -28,7 +28,9 @@ struct DataTypeTrait {};
// Stub handle for void // Stub handle for void
template <> template <>
struct DataTypeTrait<void> { struct DataTypeTrait<void> {
constexpr static auto DataType = proto::VarType::RAW; constexpr static proto::VarType::Type DataType() {
return proto::VarType::RAW;
}
}; };
#define _ForEachDataTypeHelper_(callback, cpp_type, proto_type) \ #define _ForEachDataTypeHelper_(callback, cpp_type, proto_type) \
...@@ -48,7 +50,7 @@ struct DataTypeTrait<void> { ...@@ -48,7 +50,7 @@ struct DataTypeTrait<void> {
#define DefineDataTypeTrait(cpp_type, proto_type) \ #define DefineDataTypeTrait(cpp_type, proto_type) \
template <> \ template <> \
struct DataTypeTrait<cpp_type> { \ struct DataTypeTrait<cpp_type> { \
constexpr static auto DataType = proto_type; \ constexpr static proto::VarType::Type DataType() { return proto_type; } \
} }
_ForEachDataType_(DefineDataTypeTrait); _ForEachDataType_(DefineDataTypeTrait);
......
...@@ -24,10 +24,10 @@ template <typename T> ...@@ -24,10 +24,10 @@ template <typename T>
inline const T* Tensor::data() const { inline const T* Tensor::data() const {
check_memory_size(); check_memory_size();
bool valid = bool valid =
std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType; std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType();
PADDLE_ENFORCE( PADDLE_ENFORCE(
valid, "Tensor holds the wrong type, it holds %s, but desires to be %s", valid, "Tensor holds the wrong type, it holds %s, but desires to be %s",
DataTypeToString(type_), DataTypeToString(DataTypeTrait<T>::DataType)); DataTypeToString(type_), DataTypeToString(DataTypeTrait<T>::DataType()));
return reinterpret_cast<const T*>( return reinterpret_cast<const T*>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_); reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
...@@ -39,10 +39,10 @@ template <typename T> ...@@ -39,10 +39,10 @@ template <typename T>
inline T* Tensor::data() { inline T* Tensor::data() {
check_memory_size(); check_memory_size();
bool valid = bool valid =
std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType; std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType();
PADDLE_ENFORCE( PADDLE_ENFORCE(
valid, "Tensor holds the wrong type, it holds %s, but desires to be %s", valid, "Tensor holds the wrong type, it holds %s, but desires to be %s",
DataTypeToString(type_), DataTypeToString(DataTypeTrait<T>::DataType)); DataTypeToString(type_), DataTypeToString(DataTypeTrait<T>::DataType()));
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) + return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_); offset_);
} }
...@@ -59,7 +59,7 @@ template <typename T> ...@@ -59,7 +59,7 @@ template <typename T>
inline T* Tensor::mutable_data(platform::Place place, size_t requested_size) { inline T* Tensor::mutable_data(platform::Place place, size_t requested_size) {
static_assert(std::is_pod<T>::value, "T must be POD"); static_assert(std::is_pod<T>::value, "T must be POD");
return reinterpret_cast<T*>( return reinterpret_cast<T*>(
mutable_data(place, DataTypeTrait<T>::DataType, requested_size)); mutable_data(place, DataTypeTrait<T>::DataType(), requested_size));
} }
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
......
...@@ -280,13 +280,13 @@ bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs, ...@@ -280,13 +280,13 @@ bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs,
auto type = fetch.type(); auto type = fetch.type();
auto output = &(outputs->at(i)); auto output = &(outputs->at(i));
output->name = fetchs_[idx]->Input("X")[0]; output->name = fetchs_[idx]->Input("X")[0];
if (type == framework::DataTypeTrait<float>::DataType) { if (type == framework::DataTypeTrait<float>::DataType()) {
GetFetchOne<float>(fetch, output); GetFetchOne<float>(fetch, output);
output->dtype = PaddleDType::FLOAT32; output->dtype = PaddleDType::FLOAT32;
} else if (type == framework::DataTypeTrait<int64_t>::DataType) { } else if (type == framework::DataTypeTrait<int64_t>::DataType()) {
GetFetchOne<int64_t>(fetch, output); GetFetchOne<int64_t>(fetch, output);
output->dtype = PaddleDType::INT64; output->dtype = PaddleDType::INT64;
} else if (type == framework::DataTypeTrait<int32_t>::DataType) { } else if (type == framework::DataTypeTrait<int32_t>::DataType()) {
GetFetchOne<int32_t>(fetch, output); GetFetchOne<int32_t>(fetch, output);
output->dtype = PaddleDType::INT32; output->dtype = PaddleDType::INT32;
} else { } else {
......
...@@ -103,8 +103,8 @@ framework::OpKernelType ConvOp::GetExpectedKernelType( ...@@ -103,8 +103,8 @@ framework::OpKernelType ConvOp::GetExpectedKernelType(
library = framework::LibraryType::kMKLDNN; library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN; layout = framework::DataLayout::kMKLDNN;
customized_type_value = customized_type_value =
(input_data_type == framework::DataTypeTrait<int8_t>::DataType || (input_data_type == framework::DataTypeTrait<int8_t>::DataType() ||
input_data_type == framework::DataTypeTrait<uint8_t>::DataType) input_data_type == framework::DataTypeTrait<uint8_t>::DataType())
? kConvMKLDNNINT8 ? kConvMKLDNNINT8
: kConvMKLDNNFP32; : kConvMKLDNNFP32;
} }
......
...@@ -87,10 +87,10 @@ class PriorBoxOp : public framework::OperatorWithKernel { ...@@ -87,10 +87,10 @@ class PriorBoxOp : public framework::OperatorWithKernel {
auto input_image_type = ctx.Input<framework::Tensor>("Image")->type(); auto input_image_type = ctx.Input<framework::Tensor>("Image")->type();
int customized_type_value = int customized_type_value =
framework::OpKernelType::kDefaultCustomizedTypeValue; framework::OpKernelType::kDefaultCustomizedTypeValue;
if (input_image_type == framework::DataTypeTrait<float>::DataType) { if (input_image_type == framework::DataTypeTrait<float>::DataType()) {
customized_type_value = kPriorBoxFLOAT; customized_type_value = kPriorBoxFLOAT;
} else if (input_image_type == } else if (input_image_type ==
framework::DataTypeTrait<double>::DataType) { framework::DataTypeTrait<double>::DataType()) {
customized_type_value = kPriorBoxDOUBLE; customized_type_value = kPriorBoxDOUBLE;
} }
return framework::OpKernelType(input_input_type, ctx.GetPlace(), layout_, return framework::OpKernelType(input_input_type, ctx.GetPlace(), layout_,
......
...@@ -358,13 +358,13 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -358,13 +358,13 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto dst_dt = unsigned_output auto dst_dt = unsigned_output
? paddle::framework::ToMKLDNNDataType( ? paddle::framework::ToMKLDNNDataType(
framework::DataTypeTrait<uint8_t>::DataType) framework::DataTypeTrait<uint8_t>::DataType())
: paddle::framework::ToMKLDNNDataType( : paddle::framework::ToMKLDNNDataType(
framework::DataTypeTrait<int8_t>::DataType); framework::DataTypeTrait<int8_t>::DataType());
if (force_fp32_output) { if (force_fp32_output) {
dst_dt = paddle::framework::ToMKLDNNDataType( dst_dt = paddle::framework::ToMKLDNNDataType(
framework::DataTypeTrait<float>::DataType); framework::DataTypeTrait<float>::DataType());
} }
if (fuse_residual_conn) { if (fuse_residual_conn) {
......
...@@ -917,7 +917,7 @@ static void SetDstMemoryQuantized( ...@@ -917,7 +917,7 @@ static void SetDstMemoryQuantized(
auto dst_md = platform::MKLDNNMemDesc( auto dst_md = platform::MKLDNNMemDesc(
{dst_tz}, paddle::framework::ToMKLDNNDataType( {dst_tz}, paddle::framework::ToMKLDNNDataType(
framework::DataTypeTrait<T>::DataType), framework::DataTypeTrait<T>::DataType()),
dst_fmt); dst_fmt);
dst_pd.reset(new mkldnn::memory::primitive_desc(dst_md, engine)); dst_pd.reset(new mkldnn::memory::primitive_desc(dst_md, engine));
dst_memory.reset(new mkldnn::memory(*dst_pd, to_void_cast<T>(output_data))); dst_memory.reset(new mkldnn::memory(*dst_pd, to_void_cast<T>(output_data)));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册