diff --git a/paddle/pten/api/lib/utils/tensor_utils.cc b/paddle/pten/api/lib/utils/tensor_utils.cc index b02392e5763be0759589caf9f73f903e92a4c6c2..4936006d26f8aa99fb00b4e95e74fd74514d6abe 100644 --- a/paddle/pten/api/lib/utils/tensor_utils.cc +++ b/paddle/pten/api/lib/utils/tensor_utils.cc @@ -146,7 +146,7 @@ void ReMakePtenDenseTensor(const paddle::framework::Tensor& src, auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst); meta->dims = src.dims(); // Since the type of DenseTensorMeta is const, const_cast must be used - const_cast(meta->type) = pten::TransToPtenDataType(src.type()); + const_cast(meta->dtype) = pten::TransToPtenDataType(src.type()); // Since the type of DenseTensorMeta is const, const_cast must be used const_cast(meta->layout) = pten::TransToPtenDataLayout(src.layout()); @@ -164,7 +164,7 @@ void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst); meta->dims = src.dims(); // Since the type of DenseTensorMeta is const, const_cast must be used - const_cast(meta->type) = pten::TransToPtenDataType(src.type()); + const_cast(meta->dtype) = pten::TransToPtenDataType(src.type()); // Since the type of DenseTensorMeta is const, const_cast must be used const_cast(meta->layout) = pten::TransToPtenDataLayout(src.layout()); diff --git a/paddle/pten/core/dense_tensor.cc b/paddle/pten/core/dense_tensor.cc index b972770f5566869a24299b9c352066e848c5234c..c080b865a51acb09b536eaff0011da3bcfc24379 100644 --- a/paddle/pten/core/dense_tensor.cc +++ b/paddle/pten/core/dense_tensor.cc @@ -80,8 +80,8 @@ T* DenseTensor::mutable_data() { // In order to be compatible with the original Tensor design and // execution system, we have to reset the datatype in mutable_data. // When the compatibility phase is over in the future, we can delete it - if (meta_.type == DataType::UNDEFINED) { - const_cast(meta_.type) = + if (meta_.dtype == DataType::UNDEFINED) { + const_cast(meta_.dtype) = paddle::experimental::CppTypeToDataType::Type(); } PADDLE_ENFORCE( diff --git a/paddle/pten/core/dense_tensor.h b/paddle/pten/core/dense_tensor.h index 9d6d05551a177aee022a7e4faefd206e817a2321..42fed722d0d5c6869a57e07492ae47ead4667df7 100644 --- a/paddle/pten/core/dense_tensor.h +++ b/paddle/pten/core/dense_tensor.h @@ -90,7 +90,7 @@ class DenseTensor : public TensorBase, /// \brief Returns the data type of the tensor. /// \return The data type of the tensor. - DataType dtype() const noexcept override { return meta_.type; } + DataType dtype() const noexcept override { return meta_.dtype; } /// \brief Returns the data layout of the tensor. /// \return The data layout of the tensor. diff --git a/paddle/pten/core/tensor_meta.cc b/paddle/pten/core/tensor_meta.cc index ebdcd9b5f250b8949059043a3877565a7989ef1f..3e06508be69d6574fdd93dc5d28afe3bd9c1827e 100644 --- a/paddle/pten/core/tensor_meta.cc +++ b/paddle/pten/core/tensor_meta.cc @@ -16,23 +16,23 @@ limitations under the License. */ namespace pten { -DenseTensorMeta::DenseTensorMeta(DataType type, const DDim& dims) - : dims(dims), type(type) {} +DenseTensorMeta::DenseTensorMeta(DataType dtype, const DDim& dims) + : dims(dims), dtype(dtype) {} -DenseTensorMeta::DenseTensorMeta(DataType type, +DenseTensorMeta::DenseTensorMeta(DataType dtype, const DDim& dims, DataLayout layout) - : dims(dims), type(type), layout(layout) {} + : dims(dims), dtype(dtype), layout(layout) {} -DenseTensorMeta::DenseTensorMeta(DataType type, +DenseTensorMeta::DenseTensorMeta(DataType dtype, const DDim& dims, DataLayout layout, const std::vector>& lod) - : dims(dims), type(type), layout(layout), lod(lod) {} + : dims(dims), dtype(dtype), layout(layout), lod(lod) {} bool DenseTensorMeta::valid() const noexcept { bool valid{true}; - valid = valid && (type != DataType::UNDEFINED); + valid = valid && (dtype != DataType::UNDEFINED); valid = valid && (layout != DataLayout::UNDEFINED); valid = valid && (is_scalar || product(dims) >= 0); return valid; @@ -41,7 +41,7 @@ bool DenseTensorMeta::valid() const noexcept { bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) { bool ret = true; return ret && (lhs.is_scalar == rhs.is_scalar) && (lhs.dims == rhs.dims) && - (lhs.type == rhs.type) && (lhs.layout == rhs.layout) && + (lhs.dtype == rhs.dtype) && (lhs.layout == rhs.layout) && (lhs.lod == rhs.lod) && (lhs.offset == rhs.offset); } } // namespace pten diff --git a/paddle/pten/core/tensor_meta.h b/paddle/pten/core/tensor_meta.h index f48d69260b5ee9089ba93bd3cd6a8cda47246fa1..cc02c57a48ba13c89f80a6aeafa149bdffaf6edf 100644 --- a/paddle/pten/core/tensor_meta.h +++ b/paddle/pten/core/tensor_meta.h @@ -39,9 +39,9 @@ struct DenseTensorMeta { using DataLayout = paddle::experimental::DataLayout; DenseTensorMeta() = default; - DenseTensorMeta(DataType type, const DDim& dims); - DenseTensorMeta(DataType type, const DDim& dims, DataLayout layout); - DenseTensorMeta(DataType type, + DenseTensorMeta(DataType dtype, const DDim& dims); + DenseTensorMeta(DataType dtype, const DDim& dims, DataLayout layout); + DenseTensorMeta(DataType dtype, const DDim& dims, DataLayout layout, const std::vector>& lod); @@ -54,7 +54,7 @@ struct DenseTensorMeta { /// marked with `const` are expected to remain unchanged. bool is_scalar{false}; DDim dims; - DataType type{DataType::UNDEFINED}; + DataType dtype{DataType::UNDEFINED}; DataLayout layout{DataLayout::NCHW}; LoD lod; size_t offset{0}; diff --git a/paddle/pten/infermeta/binary.cc b/paddle/pten/infermeta/binary.cc index e124466a6d33afc9310a2850eddecd56378a336a..838e450007fcd458ae97ca0e00bb10dbe2e6c55f 100644 --- a/paddle/pten/infermeta/binary.cc +++ b/paddle/pten/infermeta/binary.cc @@ -56,7 +56,7 @@ DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta, y_dims.to_str())); x_dims[x_dims.size() - 1] = 1; - DenseTensorMeta return_meta(x_meta.type, x_dims, x_meta.layout); + DenseTensorMeta return_meta(x_meta.dtype, x_dims, x_meta.layout); return return_meta; } @@ -127,13 +127,13 @@ DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta, auto ddim_out = paddle::framework::make_ddim(new_dims); - return {x_meta.type, ddim_out, x_meta.layout}; + return {x_meta.dtype, ddim_out, x_meta.layout}; } DenseTensorMeta ElementwiseInferShape(const DenseTensorMeta& x_meta, const DenseTensorMeta& y_meta, int axis) { - DenseTensorMeta return_meta(x_meta.type, x_meta.dims, x_meta.layout); + DenseTensorMeta return_meta(x_meta.dtype, x_meta.dims, x_meta.layout); if (x_meta.dims != y_meta.dims) { auto x_dims = x_meta.dims; auto y_dims = y_meta.dims; diff --git a/paddle/pten/infermeta/unary.cc b/paddle/pten/infermeta/unary.cc index 5099984886cce5d31b4a26a445f6cc2b01660b61..ea6e97db3460d9ef63dc3c16111598a59e5be29c 100644 --- a/paddle/pten/infermeta/unary.cc +++ b/paddle/pten/infermeta/unary.cc @@ -23,7 +23,7 @@ DenseTensorMeta UnchangedInferShape(const DenseTensorMeta& x_meta) { DenseTensorMeta ReductionInferShape(const DenseTensorMeta& x_meta) { const auto& out_dims = paddle::framework::make_ddim({1}); - DenseTensorMeta return_meta(x_meta.type, out_dims, x_meta.layout); + DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout); return return_meta; } @@ -63,7 +63,7 @@ DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta, out_shape.push_back(x_dims[i]); } const auto& out_dims = paddle::framework::make_ddim(out_shape); - DenseTensorMeta return_meta(x_meta.type, out_dims, x_meta.layout); + DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout); if (x_dims[0] == return_meta.dims[0]) { // Only pass LoD when the first dimension of output and Input(X) @@ -77,7 +77,7 @@ DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta, DenseTensorMeta FullLikeInferShape(const DenseTensorMeta& x_meta, DataType dtype, DataLayout layout) { - return {dtype == DataType::UNDEFINED ? x_meta.type : dtype, + return {dtype == DataType::UNDEFINED ? x_meta.dtype : dtype, x_meta.dims, layout == DataLayout::UNDEFINED ? x_meta.layout : layout}; } @@ -211,7 +211,7 @@ DenseTensorMeta InferShapeFromVecValue(const DenseTensorMeta& x_meta, "But received 'shape' is empty.")); auto x_dims = x_meta.dims; auto out_dims = ValidateShape(shape, x_dims); - DenseTensorMeta return_meta(x_meta.type, out_dims, x_meta.layout); + DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout); if (x_dims[0] == return_meta.dims[0]) { // Only pass LoD when the first dimension of output and Input(X) // are the same. diff --git a/paddle/pten/tests/core/test_dense_tensor.cc b/paddle/pten/tests/core/test_dense_tensor.cc index 6e7bfede06c180cfab89f014ad7c6fd044424a13..2879a429d9b8260d85a1eef24cdb0f76371a94f6 100644 --- a/paddle/pten/tests/core/test_dense_tensor.cc +++ b/paddle/pten/tests/core/test_dense_tensor.cc @@ -31,32 +31,32 @@ TEST(dense_tensor, meta) { CHECK(!meta_0.valid()); DenseTensorMeta meta_1(dtype, dims); - CHECK(meta_1.type == dtype); + CHECK(meta_1.dtype == dtype); CHECK(meta_1.dims == dims); CHECK(meta_1.valid()); DenseTensorMeta meta_2(dtype, dims, layout); - CHECK(meta_2.type == dtype); + CHECK(meta_2.dtype == dtype); CHECK(meta_2.dims == dims); CHECK(meta_2.layout == layout); CHECK(meta_2.valid()); DenseTensorMeta meta_3(dtype, dims, layout, lod); - CHECK(meta_3.type == dtype); + CHECK(meta_3.dtype == dtype); CHECK(meta_3.dims == dims); CHECK(meta_3.layout == layout); CHECK(meta_3.lod == lod); CHECK(meta_3.valid()); DenseTensorMeta meta_4(meta_3); - CHECK(meta_4.type == dtype); + CHECK(meta_4.dtype == dtype); CHECK(meta_4.dims == dims); CHECK(meta_4.layout == layout); CHECK(meta_4.lod == lod); CHECK(meta_4.valid()); DenseTensorMeta meta_5(std::move(meta_4)); - CHECK(meta_5.type == dtype); + CHECK(meta_5.dtype == dtype); CHECK(meta_5.dims == dims); CHECK(meta_5.layout == layout); CHECK(meta_5.lod == lod); @@ -82,7 +82,7 @@ TEST(dense_tensor, ctor) { bool r{true}; r = r && (t.numel() == product(m.dims)); r = r && (t.dims() == m.dims); - r = r && (t.dtype() == m.type); + r = r && (t.dtype() == m.dtype); r = r && (t.layout() == m.layout); r = r && (t.place() == paddle::platform::CPUPlace()); r = r && t.initialized(); diff --git a/paddle/pten/tests/kernels/test_dot_dev_api.cc b/paddle/pten/tests/kernels/test_dot_dev_api.cc index 2276d49590a701b9103e4ede9a693f37c6b58d44..5485ef2843c2ca7ab7567f0f95ca0c91e9ddbb8f 100644 --- a/paddle/pten/tests/kernels/test_dot_dev_api.cc +++ b/paddle/pten/tests/kernels/test_dot_dev_api.cc @@ -62,7 +62,7 @@ TEST(DEV_API, dot) { // 3. check result ASSERT_EQ(out.dims().size(), 2); ASSERT_EQ(out.dims()[0], 3); - ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32); + ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32); ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW); auto expect_result = sum; diff --git a/paddle/pten/tests/kernels/test_elementwise_dev_api.cc b/paddle/pten/tests/kernels/test_elementwise_dev_api.cc index 062021ccc601e0150197d3a6945a9ac8e730906c..c6e0d339915447ba68a65d34dcc1cb92dcdb4644 100644 --- a/paddle/pten/tests/kernels/test_elementwise_dev_api.cc +++ b/paddle/pten/tests/kernels/test_elementwise_dev_api.cc @@ -65,7 +65,7 @@ TEST(DEV_API, elementwise_add) { // 3. check result ASSERT_EQ(dense_out.dims().size(), 2); ASSERT_EQ(dense_out.dims()[0], 3); - ASSERT_EQ(dense_out.meta().type, pten::DataType::FLOAT32); + ASSERT_EQ(dense_out.meta().dtype, pten::DataType::FLOAT32); ASSERT_EQ(dense_out.meta().layout, pten::DataLayout::NCHW); auto expect_result = sum; diff --git a/paddle/pten/tests/kernels/test_fill_dev_api.cc b/paddle/pten/tests/kernels/test_fill_dev_api.cc index 6e6af22f6de89001e70d725695bfb102052938be..aa66877881b66c69d7c6548af77a01e23fa9bff3 100644 --- a/paddle/pten/tests/kernels/test_fill_dev_api.cc +++ b/paddle/pten/tests/kernels/test_fill_dev_api.cc @@ -50,7 +50,7 @@ TEST(DEV_API, fill_any_like) { ASSERT_EQ(out.dims().size(), 2); ASSERT_EQ(out.dims()[0], 3); ASSERT_EQ(out.numel(), 6); - ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32); + ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32); ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW); auto* actual_result = out.data(); diff --git a/paddle/pten/tests/kernels/test_flatten_dev_api.cc b/paddle/pten/tests/kernels/test_flatten_dev_api.cc index b027c75a37b31095d7d57feeb83d4bfbc5722ba4..a9be6108d24b6145e4b5d598e0d2fd8be1883735 100644 --- a/paddle/pten/tests/kernels/test_flatten_dev_api.cc +++ b/paddle/pten/tests/kernels/test_flatten_dev_api.cc @@ -56,7 +56,7 @@ TEST(DEV_API, flatten) { ASSERT_EQ(out.dims()[1], expect_shape[1]); ASSERT_EQ(out.dims()[2], expect_shape[2]); ASSERT_EQ(out.numel(), 36); - ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32); + ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32); ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW); bool value_equal = true; diff --git a/paddle/pten/tests/kernels/test_mean_dev_api.cc b/paddle/pten/tests/kernels/test_mean_dev_api.cc index 1ae59ff8034f586ecbf480724c8d2b452d082e32..b16d339e18af344b9020c2bf0a4adaf359b017b4 100644 --- a/paddle/pten/tests/kernels/test_mean_dev_api.cc +++ b/paddle/pten/tests/kernels/test_mean_dev_api.cc @@ -49,7 +49,7 @@ TEST(DEV_API, mean) { // 3. check result ASSERT_EQ(out.dims().size(), 1); ASSERT_EQ(out.numel(), 1); - ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32); + ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32); ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW); auto expect_result = sum / 12; diff --git a/paddle/pten/tests/kernels/test_reshape_dev_api.cc b/paddle/pten/tests/kernels/test_reshape_dev_api.cc index c06cc8a8a406bd128a1c4a2e4b2470db50b94ad3..b227d3b009e89a325dd518e85e9071d422d44d7c 100644 --- a/paddle/pten/tests/kernels/test_reshape_dev_api.cc +++ b/paddle/pten/tests/kernels/test_reshape_dev_api.cc @@ -54,7 +54,7 @@ TEST(DEV_API, reshape) { ASSERT_EQ(out.dims()[0], expect_shape[0]); ASSERT_EQ(out.dims()[1], expect_shape[1]); ASSERT_EQ(out.numel(), 36); - ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32); + ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32); ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW); bool value_equal = true; diff --git a/paddle/pten/tests/kernels/test_scale_dev_api.cc b/paddle/pten/tests/kernels/test_scale_dev_api.cc index b057821e6cf81afd628d774d307479c55bf2d8ff..b87692137251a9609336247fb63dfeda17c725eb 100644 --- a/paddle/pten/tests/kernels/test_scale_dev_api.cc +++ b/paddle/pten/tests/kernels/test_scale_dev_api.cc @@ -56,7 +56,7 @@ TEST(DEV_API, scale) { // 3. check result ASSERT_EQ(out.dims().size(), 2); ASSERT_EQ(out.numel(), 12); - ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32); + ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32); ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW); auto expect_result = 23; @@ -101,7 +101,7 @@ TEST(DEV_API, scale_host) { // 3. check result ASSERT_EQ(out.dims().size(), 2); ASSERT_EQ(out.numel(), 12); - ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32); + ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32); ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW); auto expect_result = 23;