未验证 提交 8d32cef8 编写于 作者: C Chen Weihang 提交者: GitHub

[PTen] Unify data layout of pten and fluid (#38583)

* unify data layout

* fix test_transfer_layout error
上级 e76087ad
......@@ -234,8 +234,7 @@ class EagerTensor final {
auto* framework_tensor =
var_.GetMutable<paddle::framework::LoDTensor>();
framework_tensor->Resize(tensor_->dims());
framework_tensor->set_layout(
pten::TransToFluidDataLayout(tensor_->layout()));
framework_tensor->set_layout(tensor_->layout());
// Contruct framework::Tensor from egr::EagerTensor
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor_->impl());
......
......@@ -18,58 +18,4 @@ limitations under the License. */
#include <ostream>
#include <string>
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace framework {
enum class DataLayout {
kNHWC = 0,
kNCHW = 1,
kAnyLayout = 2,
kMKLDNN = 3, // all layouts supported by MKLDNN internally
};
inline DataLayout StringToDataLayout(const std::string& str) {
std::string s(str);
for (size_t i = 0; i < s.size(); ++i) {
s[i] = toupper(s[i]);
}
if (s == "NHWC") {
return DataLayout::kNHWC;
} else if (s == "NCHW") {
return DataLayout::kNCHW;
} else if (s == "ANYLAYOUT") {
return DataLayout::kAnyLayout;
} else if (s == "MKLDNNLAYOUT") {
return DataLayout::kMKLDNN;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown data layout type string: %s.", s));
}
}
inline std::string DataLayoutToString(const DataLayout& data_layout) {
switch (data_layout) {
case DataLayout::kNHWC:
return "NHWC";
case DataLayout::kNCHW:
return "NCHW";
case DataLayout::kAnyLayout:
return "ANY_LAYOUT";
case DataLayout::kMKLDNN:
return "MKLDNNLAYOUT";
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown Data Layout type %d.", data_layout));
}
}
inline std::ostream& operator<<(std::ostream& out, const DataLayout& l) {
out << DataLayoutToString(l);
return out;
}
} // namespace framework
} // namespace paddle
#include "paddle/pten/common/layout.h"
......@@ -60,7 +60,7 @@ OpKernelType TransPtenKernelKeyToOpKernelType(
proto::VarType::Type data_type =
pten::TransToProtoVarType(kernel_key.dtype());
platform::Place place = pten::TransToFluidPlace(kernel_key.backend());
DataLayout data_layout = pten::TransToFluidDataLayout(kernel_key.layout());
DataLayout data_layout = kernel_key.layout();
LibraryType library_type = LibraryType::kPlain;
if (kernel_key.backend() == pten::Backend::MKLDNN) {
library_type = LibraryType::kMKLDNN;
......@@ -83,8 +83,7 @@ pten::KernelKey TransOpKernelTypeToPtenKernelKey(
} else {
// do
}
paddle::experimental::DataLayout layout =
pten::TransToPtenDataLayout(kernel_type.data_layout_);
paddle::experimental::DataLayout layout = kernel_type.data_layout_;
paddle::experimental::DataType dtype =
pten::TransToPtenDataType(kernel_type.data_type_);
return pten::KernelKey(backend, layout, dtype);
......
......@@ -385,8 +385,7 @@ class ReshapeKernel {
// We can't MakePtenDenseTensor for case 2, so we solve this case by
// creating a temporary tensor here:
pten::DenseTensorMeta meta{pten::TransToPtenDataType(in->type()),
in->dims(),
pten::TransToPtenDataLayout(in->layout())};
in->dims(), in->layout()};
auto pt_out_tmp = std::make_shared<pten::DenseTensor>(
pten::make_intrusive<paddle::experimental::SharedStorage>(
ctx.GetPlace()),
......
......@@ -40,7 +40,7 @@ class TransferLayoutOp : public framework::OperatorWithKernel {
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "TransferLayout");
auto dst_layout = ctx->Attrs().Get<int>("dst_layout");
auto low_bound = static_cast<int>(framework::DataLayout::kNHWC);
auto low_bound = static_cast<int>(framework::DataLayout::kAnyLayout);
auto upper_bound = static_cast<int>(framework::DataLayout::kMKLDNN);
PADDLE_ENFORCE_GE(
dst_layout, low_bound,
......@@ -106,7 +106,7 @@ class TransferLayoutOpProtoMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "(LoDTensor) The input Tensor");
AddOutput("Out", "(LoDTensor) The Output Tensor with desired layout");
AddAttr<int>("dst_layout",
"kNHWC = 0, kNCHW = 1, kAnyLayout = 2, kMKLDNN = 3");
"kAnyLayout = 0, kNHWC = 1, kNCHW = 2, kMKLDNN = 3");
AddComment(R"DOC(
TransferLayout Operator)DOC");
}
......
......@@ -66,7 +66,7 @@ class TransferLayoutFunctor {
// Just set layout/format. No real transform occur
auto out_format = platform::MKLDNNFormatForSize(
in_tensor.dims().size(), ToMKLDNNFormat(in_layout));
in_tensor.dims().size(), framework::ToMKLDNNFormat(in_layout));
out_tensor.ShareDataWith(in_tensor);
// For NHWC data we need reshape of tensors as MKL-DNN
// is expecting NHWC dims description order
......
......@@ -155,14 +155,24 @@ struct PADDLE_ALIGN(2) bfloat16 {
// Conversion opertors
HOSTDEVICE inline explicit operator float() const {
#ifdef PADDLE_WITH_HIP
uint32_t res = 0;
// We should be using memcpy in order to respect the strict aliasing rule
// but it fails in the HIP environment.
uint16_t temp = x;
uint16_t* temp_ptr = reinterpret_cast<uint16_t*>(&temp);
res = *temp_ptr;
return res;
#else
#ifdef PADDLE_CUDA_BF16
return __bfloat162float(*reinterpret_cast<const __nv_bfloat16*>(&x));
#else
float val = 0.f;
uint16_t temp = x;
memcpy(reinterpret_cast<char*>(&val) + 2, reinterpret_cast<char*>(&temp),
2);
std::memcpy(reinterpret_cast<char*>(&val) + 2,
reinterpret_cast<char*>(&temp), 2);
return val;
#endif
#endif
}
......
......@@ -36,7 +36,7 @@ std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(
VLOG(3) << "MakePtenDenseTensor based Tensor.";
pten::DenseTensorMeta meta{pten::TransToPtenDataType(src.type()),
src.dims(),
pten::TransToPtenDataLayout(src.layout()),
src.layout(),
src.offset()};
auto shared_storage = pten::make_intrusive<SharedStorage>(src.Holder());
return std::make_unique<pten::DenseTensor>(std::move(shared_storage),
......@@ -54,10 +54,8 @@ std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(
std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(
const paddle::framework::Tensor& src, const pten::TensorArgDef& arg_def) {
pten::DenseTensorMeta meta{arg_def.dtype,
src.dims(),
pten::TransToPtenDataLayout(src.layout()),
src.offset()};
pten::DenseTensorMeta meta{
arg_def.dtype, src.dims(), src.layout(), src.offset()};
if (src.IsInitialized() &&
src.place() == pten::TransToFluidPlace(arg_def.backend)) {
......@@ -348,7 +346,7 @@ void ReMakePtenDenseTensor(const paddle::framework::Tensor& src,
auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst);
meta->dims = src.dims();
meta->dtype = pten::TransToPtenDataType(src.type());
meta->layout = pten::TransToPtenDataLayout(src.layout());
meta->layout = src.layout();
meta->offset = src.offset();
auto* shared_storage = static_cast<SharedStorage*>(
......@@ -380,7 +378,7 @@ void ReMakePtenDenseTensorByArgDef(const paddle::framework::Tensor& src,
auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst);
meta->dims = src.dims();
meta->dtype = arg_def.dtype;
meta->layout = pten::TransToPtenDataLayout(src.layout());
meta->layout = src.layout();
meta->offset = src.offset();
auto* shared_storage = static_cast<SharedStorage*>(
......
......@@ -18,6 +18,8 @@ limitations under the License. */
namespace paddle {
namespace experimental {
// Note: Here the DataLayout is public api for external users, the prefix `k`
// maybe confuse users, so we use all uppercase names
enum class DataLayout {
UNDEFINED = 0,
// TODO(chenweihang): keep ANY for compatibility, remove it later
......@@ -26,28 +28,67 @@ enum class DataLayout {
NCHW,
MKLDNN,
NUM_DATA_LAYOUTS,
// See Note [ Why we need ALL in baisc kernel key member? ]
// See Note [ Why we need ALL in basic kernel key member? ]
ALL_LAYOUT = UNDEFINED,
// Note: Unify pten DataLayout and fluid::framework::DataLayout,
// for compatible with fluid DataLayout, here need prefix `k`
// Note: The original `kAnyLayout (enum value 2)` is a strange design.
// `kAnyLayout` originally cannot represent any kind of Layout,
// at the same time, it can also represent any Layout.
// Strictly, it means "default" or "undefined" layout,
// and should not be mixed with other meaningful layouts.
kAnyLayout = ANY,
kNHWC = NHWC,
kNCHW = NCHW,
kMKLDNN = MKLDNN, // all layouts supported by MKLDNN internally
};
inline std::ostream& operator<<(std::ostream& os, DataLayout layout) {
} // namespace experimental
// In order to be compatible with the fluid implementation
namespace framework {
using DataLayout = paddle::experimental::DataLayout;
inline DataLayout StringToDataLayout(const std::string& str) {
std::string s(str);
for (size_t i = 0; i < s.size(); ++i) {
s[i] = toupper(s[i]);
}
if (s == "NHWC") {
return DataLayout::kNHWC;
} else if (s == "NCHW") {
return DataLayout::kNCHW;
} else if (s == "ANYLAYOUT") {
return DataLayout::kAnyLayout;
} else if (s == "MKLDNNLAYOUT") {
return DataLayout::kMKLDNN;
} else {
PD_THROW("Unknown data layout type string: ", s, ".");
}
}
inline std::string DataLayoutToString(const DataLayout& layout) {
switch (layout) {
case DataLayout::UNDEFINED:
os << "Undefined";
break;
case DataLayout::NHWC:
os << "NHWC";
break;
case DataLayout::NCHW:
os << "NCHW";
break;
case DataLayout::MKLDNN:
os << "MKLDNN";
break;
case DataLayout::kNHWC:
return "NHWC";
case DataLayout::kNCHW:
return "NCHW";
case DataLayout::kAnyLayout:
return "Undefined(AnyLayout)";
case DataLayout::kMKLDNN:
return "MKLDNN";
default:
PD_THROW(
"Invalid enum data layout type `", static_cast<int>(layout), "`.");
PD_THROW("Unknown Data Layout type ", static_cast<int>(layout), ".");
}
}
} // namespace framework
namespace experimental {
inline std::ostream& operator<<(std::ostream& os, DataLayout layout) {
os << framework::DataLayoutToString(layout);
return os;
}
......
......@@ -63,21 +63,6 @@ paddle::experimental::DataType TransToPtenDataType(
}
}
DataLayout TransToPtenDataLayout(const paddle::framework::DataLayout& layout) {
switch (layout) {
case paddle::framework::DataLayout::kNHWC:
return DataLayout::NHWC;
case paddle::framework::DataLayout::kNCHW:
return DataLayout::NCHW;
case paddle::framework::DataLayout::kAnyLayout:
return DataLayout::ANY;
case paddle::framework::DataLayout::kMKLDNN:
return DataLayout::MKLDNN;
default:
return DataLayout::UNDEFINED;
}
}
paddle::platform::Place TransToFluidPlace(const Backend& backend) {
// TODO(chenweihang): add other trans cases later
switch (backend) {
......@@ -141,24 +126,6 @@ paddle::framework::proto::VarType::Type TransToProtoVarType(
}
}
paddle::framework::DataLayout TransToFluidDataLayout(const DataLayout& layout) {
switch (layout) {
case DataLayout::NHWC:
return paddle::framework::DataLayout::kNHWC;
case DataLayout::NCHW:
return paddle::framework::DataLayout::kNCHW;
case DataLayout::ANY:
return paddle::framework::DataLayout::kAnyLayout;
case DataLayout::MKLDNN:
return paddle::framework::DataLayout::kMKLDNN;
default:
PADDLE_THROW(paddle::platform::errors::Unimplemented(
"Unsupported data layout `%s` when casting it into "
"paddle data layout.",
layout));
}
}
paddle::framework::LoD TransToFluidLoD(const pten::LoD& lod) {
paddle::framework::LoD out;
out.reserve(lod.size());
......
......@@ -20,7 +20,6 @@ limitations under the License. */
#include "paddle/pten/core/tensor_meta.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/place.h"
......@@ -37,12 +36,10 @@ const std::string& TransToPtenKernelName(const std::string& fluid_op_name);
Backend TransToPtenBackend(const paddle::platform::Place& place);
DataType TransToPtenDataType(
const paddle::framework::proto::VarType::Type& dtype);
DataLayout TransToPtenDataLayout(const paddle::framework::DataLayout& layout);
paddle::platform::Place TransToFluidPlace(const Backend& backend);
paddle::framework::proto::VarType::Type TransToProtoVarType(
const DataType& dtype);
paddle::framework::DataLayout TransToFluidDataLayout(const DataLayout& layout);
paddle::framework::LoD TransToFluidLoD(const pten::LoD& lod);
pten::LoD TransToPtenLoD(const paddle::framework::LoD& lod);
......
......@@ -49,8 +49,7 @@ TEST(tensor_utils, dense_tensor_to_lod_tensor) {
CHECK(dense_tensor.lod()[0] ==
static_cast<paddle::framework::Vector<size_t>>((lod_tensor.lod()[0])));
CHECK(dense_tensor.dtype() == pten::TransToPtenDataType(lod_tensor.type()));
CHECK(dense_tensor.layout() ==
pten::TransToPtenDataLayout(lod_tensor.layout()));
CHECK(dense_tensor.layout() == lod_tensor.layout());
CHECK(platform::is_cpu_place(lod_tensor.place()));
CHECK(lod_tensor.data<float>()[0] == 1.0f);
......@@ -85,7 +84,7 @@ TEST(tensor_utils, dense_tensor_to_tensor) {
experimental::MovesStorage(&dense_tensor, &tensor);
CHECK(dense_tensor.dtype() == pten::TransToPtenDataType(tensor.type()));
CHECK(dense_tensor.layout() == pten::TransToPtenDataLayout(tensor.layout()));
CHECK(dense_tensor.layout() == tensor.layout());
CHECK(platform::is_cpu_place(tensor.place()));
CHECK(tensor.data<float>()[0] == 1.0f);
......
......@@ -25,10 +25,10 @@ namespace tests {
TEST(DataLayout, OStream) {
std::ostringstream oss;
oss << pten::DataLayout::UNDEFINED;
EXPECT_EQ(oss.str(), "Undefined");
EXPECT_EQ(oss.str(), "Undefined(AnyLayout)");
oss.str("");
oss << pten::DataLayout::ANY;
EXPECT_EQ(oss.str(), "Undefined");
EXPECT_EQ(oss.str(), "Undefined(AnyLayout)");
oss.str("");
oss << pten::DataLayout::NHWC;
EXPECT_EQ(oss.str(), "NHWC");
......@@ -43,8 +43,7 @@ TEST(DataLayout, OStream) {
oss << pten::DataLayout::NUM_DATA_LAYOUTS;
} catch (const std::exception& exception) {
std::string ex_msg = exception.what();
EXPECT_TRUE(ex_msg.find("Invalid enum data layout type") !=
std::string::npos);
EXPECT_TRUE(ex_msg.find("Unknown Data Layout type") != std::string::npos);
}
}
......
......@@ -30,7 +30,7 @@ class TestTransferLayoutOpkNCHWTokNHWC(OpTest):
self.inputs = {'X': ipt.astype('float32')}
self.outputs = {'Out': ipt.transpose([0, 2, 3, 1])}
self.attrs = {
'dst_layout': 0 # kNHWC
'dst_layout': 1 # kNHWC
}
self.op_type = 'transfer_layout'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册