未验证 提交 c13edf66 编写于 作者: Z zyfncg 提交者: GitHub

【PTen】Rename TensorMeta member type to dtype (#37277)

* rename TensorBase interface data_type() to dtype()

* rename type to dtype of TensorMeta

* merge the code

* merge the code

* fix the problem when merge conflict
上级 5a000900
......@@ -146,7 +146,7 @@ void ReMakePtenDenseTensor(const paddle::framework::Tensor& src,
auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst);
meta->dims = src.dims();
// Since the type of DenseTensorMeta is const, const_cast must be used
const_cast<DataType&>(meta->type) = pten::TransToPtenDataType(src.type());
const_cast<DataType&>(meta->dtype) = pten::TransToPtenDataType(src.type());
// Since the type of DenseTensorMeta is const, const_cast must be used
const_cast<DataLayout&>(meta->layout) =
pten::TransToPtenDataLayout(src.layout());
......@@ -164,7 +164,7 @@ void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src,
auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst);
meta->dims = src.dims();
// Since the type of DenseTensorMeta is const, const_cast must be used
const_cast<DataType&>(meta->type) = pten::TransToPtenDataType(src.type());
const_cast<DataType&>(meta->dtype) = pten::TransToPtenDataType(src.type());
// Since the type of DenseTensorMeta is const, const_cast must be used
const_cast<DataLayout&>(meta->layout) =
pten::TransToPtenDataLayout(src.layout());
......
......@@ -80,8 +80,8 @@ T* DenseTensor::mutable_data() {
// In order to be compatible with the original Tensor design and
// execution system, we have to reset the datatype in mutable_data<T>.
// When the compatibility phase is over in the future, we can delete it
if (meta_.type == DataType::UNDEFINED) {
const_cast<DataType&>(meta_.type) =
if (meta_.dtype == DataType::UNDEFINED) {
const_cast<DataType&>(meta_.dtype) =
paddle::experimental::CppTypeToDataType<T>::Type();
}
PADDLE_ENFORCE(
......
......@@ -90,7 +90,7 @@ class DenseTensor : public TensorBase,
/// \brief Returns the data type of the tensor.
/// \return The data type of the tensor.
DataType dtype() const noexcept override { return meta_.type; }
DataType dtype() const noexcept override { return meta_.dtype; }
/// \brief Returns the data layout of the tensor.
/// \return The data layout of the tensor.
......
......@@ -16,23 +16,23 @@ limitations under the License. */
namespace pten {
DenseTensorMeta::DenseTensorMeta(DataType type, const DDim& dims)
: dims(dims), type(type) {}
DenseTensorMeta::DenseTensorMeta(DataType dtype, const DDim& dims)
: dims(dims), dtype(dtype) {}
DenseTensorMeta::DenseTensorMeta(DataType type,
DenseTensorMeta::DenseTensorMeta(DataType dtype,
const DDim& dims,
DataLayout layout)
: dims(dims), type(type), layout(layout) {}
: dims(dims), dtype(dtype), layout(layout) {}
DenseTensorMeta::DenseTensorMeta(DataType type,
DenseTensorMeta::DenseTensorMeta(DataType dtype,
const DDim& dims,
DataLayout layout,
const std::vector<std::vector<size_t>>& lod)
: dims(dims), type(type), layout(layout), lod(lod) {}
: dims(dims), dtype(dtype), layout(layout), lod(lod) {}
bool DenseTensorMeta::valid() const noexcept {
bool valid{true};
valid = valid && (type != DataType::UNDEFINED);
valid = valid && (dtype != DataType::UNDEFINED);
valid = valid && (layout != DataLayout::UNDEFINED);
valid = valid && (is_scalar || product(dims) >= 0);
return valid;
......@@ -41,7 +41,7 @@ bool DenseTensorMeta::valid() const noexcept {
bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) {
bool ret = true;
return ret && (lhs.is_scalar == rhs.is_scalar) && (lhs.dims == rhs.dims) &&
(lhs.type == rhs.type) && (lhs.layout == rhs.layout) &&
(lhs.dtype == rhs.dtype) && (lhs.layout == rhs.layout) &&
(lhs.lod == rhs.lod) && (lhs.offset == rhs.offset);
}
} // namespace pten
......@@ -39,9 +39,9 @@ struct DenseTensorMeta {
using DataLayout = paddle::experimental::DataLayout;
DenseTensorMeta() = default;
DenseTensorMeta(DataType type, const DDim& dims);
DenseTensorMeta(DataType type, const DDim& dims, DataLayout layout);
DenseTensorMeta(DataType type,
DenseTensorMeta(DataType dtype, const DDim& dims);
DenseTensorMeta(DataType dtype, const DDim& dims, DataLayout layout);
DenseTensorMeta(DataType dtype,
const DDim& dims,
DataLayout layout,
const std::vector<std::vector<size_t>>& lod);
......@@ -54,7 +54,7 @@ struct DenseTensorMeta {
/// marked with `const` are expected to remain unchanged.
bool is_scalar{false};
DDim dims;
DataType type{DataType::UNDEFINED};
DataType dtype{DataType::UNDEFINED};
DataLayout layout{DataLayout::NCHW};
LoD lod;
size_t offset{0};
......
......@@ -56,7 +56,7 @@ DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta,
y_dims.to_str()));
x_dims[x_dims.size() - 1] = 1;
DenseTensorMeta return_meta(x_meta.type, x_dims, x_meta.layout);
DenseTensorMeta return_meta(x_meta.dtype, x_dims, x_meta.layout);
return return_meta;
}
......@@ -127,13 +127,13 @@ DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta,
auto ddim_out = paddle::framework::make_ddim(new_dims);
return {x_meta.type, ddim_out, x_meta.layout};
return {x_meta.dtype, ddim_out, x_meta.layout};
}
DenseTensorMeta ElementwiseInferShape(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta,
int axis) {
DenseTensorMeta return_meta(x_meta.type, x_meta.dims, x_meta.layout);
DenseTensorMeta return_meta(x_meta.dtype, x_meta.dims, x_meta.layout);
if (x_meta.dims != y_meta.dims) {
auto x_dims = x_meta.dims;
auto y_dims = y_meta.dims;
......
......@@ -23,7 +23,7 @@ DenseTensorMeta UnchangedInferShape(const DenseTensorMeta& x_meta) {
DenseTensorMeta ReductionInferShape(const DenseTensorMeta& x_meta) {
const auto& out_dims = paddle::framework::make_ddim({1});
DenseTensorMeta return_meta(x_meta.type, out_dims, x_meta.layout);
DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout);
return return_meta;
}
......@@ -63,7 +63,7 @@ DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta,
out_shape.push_back(x_dims[i]);
}
const auto& out_dims = paddle::framework::make_ddim(out_shape);
DenseTensorMeta return_meta(x_meta.type, out_dims, x_meta.layout);
DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout);
if (x_dims[0] == return_meta.dims[0]) {
// Only pass LoD when the first dimension of output and Input(X)
......@@ -77,7 +77,7 @@ DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta,
DenseTensorMeta FullLikeInferShape(const DenseTensorMeta& x_meta,
DataType dtype,
DataLayout layout) {
return {dtype == DataType::UNDEFINED ? x_meta.type : dtype,
return {dtype == DataType::UNDEFINED ? x_meta.dtype : dtype,
x_meta.dims,
layout == DataLayout::UNDEFINED ? x_meta.layout : layout};
}
......@@ -211,7 +211,7 @@ DenseTensorMeta InferShapeFromVecValue(const DenseTensorMeta& x_meta,
"But received 'shape' is empty."));
auto x_dims = x_meta.dims;
auto out_dims = ValidateShape(shape, x_dims);
DenseTensorMeta return_meta(x_meta.type, out_dims, x_meta.layout);
DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout);
if (x_dims[0] == return_meta.dims[0]) {
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
......
......@@ -31,32 +31,32 @@ TEST(dense_tensor, meta) {
CHECK(!meta_0.valid());
DenseTensorMeta meta_1(dtype, dims);
CHECK(meta_1.type == dtype);
CHECK(meta_1.dtype == dtype);
CHECK(meta_1.dims == dims);
CHECK(meta_1.valid());
DenseTensorMeta meta_2(dtype, dims, layout);
CHECK(meta_2.type == dtype);
CHECK(meta_2.dtype == dtype);
CHECK(meta_2.dims == dims);
CHECK(meta_2.layout == layout);
CHECK(meta_2.valid());
DenseTensorMeta meta_3(dtype, dims, layout, lod);
CHECK(meta_3.type == dtype);
CHECK(meta_3.dtype == dtype);
CHECK(meta_3.dims == dims);
CHECK(meta_3.layout == layout);
CHECK(meta_3.lod == lod);
CHECK(meta_3.valid());
DenseTensorMeta meta_4(meta_3);
CHECK(meta_4.type == dtype);
CHECK(meta_4.dtype == dtype);
CHECK(meta_4.dims == dims);
CHECK(meta_4.layout == layout);
CHECK(meta_4.lod == lod);
CHECK(meta_4.valid());
DenseTensorMeta meta_5(std::move(meta_4));
CHECK(meta_5.type == dtype);
CHECK(meta_5.dtype == dtype);
CHECK(meta_5.dims == dims);
CHECK(meta_5.layout == layout);
CHECK(meta_5.lod == lod);
......@@ -82,7 +82,7 @@ TEST(dense_tensor, ctor) {
bool r{true};
r = r && (t.numel() == product(m.dims));
r = r && (t.dims() == m.dims);
r = r && (t.dtype() == m.type);
r = r && (t.dtype() == m.dtype);
r = r && (t.layout() == m.layout);
r = r && (t.place() == paddle::platform::CPUPlace());
r = r && t.initialized();
......
......@@ -62,7 +62,7 @@ TEST(DEV_API, dot) {
// 3. check result
ASSERT_EQ(out.dims().size(), 2);
ASSERT_EQ(out.dims()[0], 3);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto expect_result = sum;
......
......@@ -65,7 +65,7 @@ TEST(DEV_API, elementwise_add) {
// 3. check result
ASSERT_EQ(dense_out.dims().size(), 2);
ASSERT_EQ(dense_out.dims()[0], 3);
ASSERT_EQ(dense_out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(dense_out.meta().dtype, pten::DataType::FLOAT32);
ASSERT_EQ(dense_out.meta().layout, pten::DataLayout::NCHW);
auto expect_result = sum;
......
......@@ -50,7 +50,7 @@ TEST(DEV_API, fill_any_like) {
ASSERT_EQ(out.dims().size(), 2);
ASSERT_EQ(out.dims()[0], 3);
ASSERT_EQ(out.numel(), 6);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto* actual_result = out.data<float>();
......
......@@ -56,7 +56,7 @@ TEST(DEV_API, flatten) {
ASSERT_EQ(out.dims()[1], expect_shape[1]);
ASSERT_EQ(out.dims()[2], expect_shape[2]);
ASSERT_EQ(out.numel(), 36);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
bool value_equal = true;
......
......@@ -49,7 +49,7 @@ TEST(DEV_API, mean) {
// 3. check result
ASSERT_EQ(out.dims().size(), 1);
ASSERT_EQ(out.numel(), 1);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto expect_result = sum / 12;
......
......@@ -54,7 +54,7 @@ TEST(DEV_API, reshape) {
ASSERT_EQ(out.dims()[0], expect_shape[0]);
ASSERT_EQ(out.dims()[1], expect_shape[1]);
ASSERT_EQ(out.numel(), 36);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
bool value_equal = true;
......
......@@ -56,7 +56,7 @@ TEST(DEV_API, scale) {
// 3. check result
ASSERT_EQ(out.dims().size(), 2);
ASSERT_EQ(out.numel(), 12);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto expect_result = 23;
......@@ -101,7 +101,7 @@ TEST(DEV_API, scale_host) {
// 3. check result
ASSERT_EQ(out.dims().size(), 2);
ASSERT_EQ(out.numel(), 12);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto expect_result = 23;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册