From 7777c811b1a9b5dfd86cb97fa347f4a1e6880314 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 25 Dec 2017 20:44:26 +0800 Subject: [PATCH] "add data layout" (#6955) * "add data layout" * "need kernel registry support" * "fix data layout" * "reorder include headers" * "change enum to enum class" * "fix CI" --- paddle/framework/data_layout.h | 9 +++++---- paddle/framework/library_type.h | 8 ++++---- paddle/framework/op_kernel_type.h | 1 + paddle/framework/tensor.h | 19 ++++++++++++++++++- paddle/framework/tensor_impl.h | 1 + paddle/framework/tensor_test.cc | 9 +++++++++ paddle/framework/tensor_util.h | 2 ++ paddle/framework/tensor_util_test.cc | 9 ++++++++- paddle/pybind/pybind.cc | 4 ++++ 9 files changed, 52 insertions(+), 10 deletions(-) diff --git a/paddle/framework/data_layout.h b/paddle/framework/data_layout.h index 7d7a444cf0..4a8669c3a4 100644 --- a/paddle/framework/data_layout.h +++ b/paddle/framework/data_layout.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include "paddle/platform/enforce.h" #include #include "paddle/platform/enforce.h" @@ -20,7 +21,7 @@ limitations under the License. */ namespace paddle { namespace framework { -enum DataLayout { +enum class DataLayout { kNHWC = 0, kNCHW = 1, kAnyLayout = 2, @@ -38,11 +39,11 @@ inline DataLayout StringToDataLayout(const std::string& str) { inline std::string DataLayoutToString(const DataLayout& data_layout) { switch (data_layout) { - case kNHWC: + case DataLayout::kNHWC: return "NHWC"; - case kNCHW: + case DataLayout::kNCHW: return "NCHW"; - case kAnyLayout: + case DataLayout::kAnyLayout: return "ANY_LAYOUT"; default: PADDLE_THROW("unknown DataLayou %d", data_layout); diff --git a/paddle/framework/library_type.h b/paddle/framework/library_type.h index aa66cf00f3..6baae6c2bb 100644 --- a/paddle/framework/library_type.h +++ b/paddle/framework/library_type.h @@ -20,15 +20,15 @@ namespace framework { // For more details about the design of LibraryType, Please refer to // https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/operator_kernel_type.md#library -enum LibraryType { kPlain = 0, kMKLDNN = 1, kCUDNN = 2 }; +enum class LibraryType { kPlain = 0, kMKLDNN = 1, kCUDNN = 2 }; inline std::string LibraryTypeToString(const LibraryType& library_type) { switch (library_type) { - case kPlain: + case LibraryType::kPlain: return "PLAIN"; - case kMKLDNN: + case LibraryType::kMKLDNN: return "MKLDNN"; - case kCUDNN: + case LibraryType::kCUDNN: return "CUDNN"; default: PADDLE_THROW("unknown LibraryType %d", library_type); diff --git a/paddle/framework/op_kernel_type.h b/paddle/framework/op_kernel_type.h index e9c45b958c..97b542e345 100644 --- a/paddle/framework/op_kernel_type.h +++ b/paddle/framework/op_kernel_type.h @@ -40,6 +40,7 @@ struct OpKernelType { // place, data_type, library_type kinds less than 2^8 constexpr static int LEFT_SHIFT = 8; + proto::DataType data_type_; DataLayout data_layout_; platform::Place place_; diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 6a0c5133c9..b9f6884f7c 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -20,12 +20,12 @@ limitations under the License. */ #include #include +#include "paddle/framework/data_layout.h" #include "paddle/framework/ddim.h" #include "paddle/memory/memory.h" #include "paddle/platform/device_context.h" #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" -#include "unsupported/Eigen/CXX11/Tensor" namespace paddle { @@ -115,6 +115,10 @@ class Tensor { inline void check_memory_size() const; + inline DataLayout layout() const { return layout_; } + + inline void set_layout(const DataLayout layout) { layout_ = layout; } + private: friend class LoDTensor; @@ -173,6 +177,19 @@ class Tensor { DDim dims_; + /** + * @brief the layout of memory block, default is NCHW. + * + * @note the memory allocation order, describe how weight/data is stored + * For example, in 4-D Tensor(rank=4), there are three commonly + * used layout. They are + * NCHW, NHWC, CHWN. + * N,C,H,W for respectively the batch size, the number of + * feature maps, the height. + */ + + DataLayout layout_ = DataLayout::kNHWC; + /** * @brief A PlaceHolder may be shared by more than one tensor. * diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 3d93b7808b..6c6f298edc 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -165,6 +165,7 @@ inline Tensor Tensor::Slice(int begin_idx, int end_idx) const { size_t base = numel() / dims_[0]; Tensor dst; dst.holder_ = holder_; + dst.set_layout(layout_); DDim dst_dims = dims_; dst_dims[0] = end_idx - begin_idx; dst.Resize(dst_dims); diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index f347981f2e..ca76a9fcb9 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -200,3 +200,12 @@ TEST(Tensor, ReshapeToMatrix) { ASSERT_EQ(res.dims()[0], 2 * 3); ASSERT_EQ(res.dims()[1], 4 * 9); } + +TEST(Tensor, Layout) { + using namespace paddle::framework; + using namespace paddle::platform; + Tensor src; + ASSERT_EQ(src.layout(), DataLayout::kNHWC); + src.set_layout(DataLayout::kAnyLayout); + ASSERT_EQ(src.layout(), DataLayout::kAnyLayout); +} diff --git a/paddle/framework/tensor_util.h b/paddle/framework/tensor_util.h index ebfb0e5538..692f5f1af7 100644 --- a/paddle/framework/tensor_util.h +++ b/paddle/framework/tensor_util.h @@ -33,6 +33,7 @@ inline void CopyFrom(const Tensor& src, const platform::Place& dst_place, src.check_memory_size(); dst->Resize(src.dims()); + dst->set_layout(src.layout()); auto src_place = src.place(); auto src_ptr = src.data(); @@ -89,6 +90,7 @@ inline void CopyFrom(const Tensor& src, const platform::Place& dst_place, Tensor* dst) { src.check_memory_size(); dst->Resize(src.dims()); + dst->set_layout(src.layout()); auto src_place = src.place(); auto src_ptr = src.data(); diff --git a/paddle/framework/tensor_util_test.cc b/paddle/framework/tensor_util_test.cc index 6fc243aaf6..f388c19f28 100644 --- a/paddle/framework/tensor_util_test.cc +++ b/paddle/framework/tensor_util_test.cc @@ -28,6 +28,7 @@ TEST(CopyFrom, Tensor) { int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; memcpy(src_ptr, arr, 9 * sizeof(int)); + src_tensor.set_layout(DataLayout::kAnyLayout); auto cpu_place = new platform::CPUPlace(); CopyFrom(src_tensor, *cpu_place, &dst_tensor); @@ -38,14 +39,18 @@ TEST(CopyFrom, Tensor) { EXPECT_EQ(src_ptr[i], dst_ptr[i]); } + EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout()); + Tensor slice_tensor = src_tensor.Slice(1, 2); - CopyFrom(slice_tensor, *cpu_place, cpu_ctx, &dst_tensor); + CopyFrom(slice_tensor, *cpu_place, &dst_tensor); const int* slice_ptr = slice_tensor.data(); dst_ptr = dst_tensor.data(); ASSERT_NE(dst_ptr, slice_ptr); for (size_t i = 0; i < 3; ++i) { EXPECT_EQ(dst_ptr[i], slice_ptr[i]); } + EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout()); + #ifdef PADDLE_WITH_CUDA { Tensor src_tensor; @@ -91,6 +96,8 @@ TEST(CopyFrom, Tensor) { for (size_t i = 0; i < 3; ++i) { EXPECT_EQ(dst_ptr[i], slice_ptr[i]); } + + EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout()); } #endif } diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 668a48e816..07e38476e6 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -78,6 +78,10 @@ PYBIND11_PLUGIN(core) { [](Tensor &self, const std::vector &dim) { self.Resize(make_ddim(dim)); }) + .def("set_layout", + [](Tensor &self, const std::string &layout) { + self.set_layout(StringToDataLayout(layout)); + }) .def("alloc_float", [](Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); -- GitLab