未验证 提交 1e9b3a3d 编写于 作者: Z zyfncg 提交者: GitHub

rename TensorBase interface data_type() to dtype() (#37257)

上级 0daa69d4
...@@ -109,9 +109,9 @@ void Tensor::reshape(const std::vector<int64_t> &shape) { ...@@ -109,9 +109,9 @@ void Tensor::reshape(const std::vector<int64_t> &shape) {
"and it will be implemented by calling the reshape kernel later.")); "and it will be implemented by calling the reshape kernel later."));
} }
DataType Tensor::dtype() const { return impl_->data_type(); } DataType Tensor::dtype() const { return impl_->dtype(); }
DataType Tensor::type() const { return impl_->data_type(); } DataType Tensor::type() const { return impl_->dtype(); }
DataLayout Tensor::layout() const { return impl_->layout(); } DataLayout Tensor::layout() const { return impl_->layout(); }
......
...@@ -125,7 +125,7 @@ void MovesStorage(pten::DenseTensor* src, paddle::framework::Tensor* dst) { ...@@ -125,7 +125,7 @@ void MovesStorage(pten::DenseTensor* src, paddle::framework::Tensor* dst) {
auto storage = src->release(); auto storage = src->release();
std::shared_ptr<paddle::memory::allocation::Allocation> holder( std::shared_ptr<paddle::memory::allocation::Allocation> holder(
new TensorStorage(std::move(storage))); new TensorStorage(std::move(storage)));
dst->ResetHolderWithType(holder, pten::TransToProtoVarType(src->data_type())); dst->ResetHolderWithType(holder, pten::TransToProtoVarType(src->dtype()));
} }
void MovesStorage(pten::DenseTensor* src, paddle::framework::LoDTensor* dst) { void MovesStorage(pten::DenseTensor* src, paddle::framework::LoDTensor* dst) {
......
...@@ -74,7 +74,7 @@ class CompatibleDenseTensorUtils { ...@@ -74,7 +74,7 @@ class CompatibleDenseTensorUtils {
ret.meta_.dims[0] = end_idx - begin_idx; ret.meta_.dims[0] = end_idx - begin_idx;
ret.meta_.offset = tensor->meta_.offset + ret.meta_.offset = tensor->meta_.offset +
begin_idx * (tensor->numel() / tensor->dims()[0]) * begin_idx * (tensor->numel() / tensor->dims()[0]) *
paddle::experimental::SizeOf(tensor->data_type()); paddle::experimental::SizeOf(tensor->dtype());
} }
return ret; return ret;
} }
......
...@@ -24,14 +24,12 @@ namespace pten { ...@@ -24,14 +24,12 @@ namespace pten {
DenseTensor::DenseTensor(const std::shared_ptr<Allocator>& a, DenseTensor::DenseTensor(const std::shared_ptr<Allocator>& a,
const DenseTensorMeta& meta) const DenseTensorMeta& meta)
: meta_(meta), : meta_(meta),
storage_( storage_(make_intrusive<TensorStorage>(a, SizeOf(dtype()) * numel())) {}
make_intrusive<TensorStorage>(a, SizeOf(data_type()) * numel())) {}
DenseTensor::DenseTensor(const std::shared_ptr<Allocator>& a, DenseTensor::DenseTensor(const std::shared_ptr<Allocator>& a,
DenseTensorMeta&& meta) DenseTensorMeta&& meta)
: meta_(std::move(meta)), : meta_(std::move(meta)),
storage_( storage_(make_intrusive<TensorStorage>(a, SizeOf(dtype()) * numel())) {}
make_intrusive<TensorStorage>(a, SizeOf(data_type()) * numel())) {}
DenseTensor::DenseTensor(intrusive_ptr<Storage> storage, DenseTensor::DenseTensor(intrusive_ptr<Storage> storage,
const DenseTensorMeta& meta) const DenseTensorMeta& meta)
...@@ -60,7 +58,7 @@ void* DenseTensor::mutable_data(size_t request_bytes) { ...@@ -60,7 +58,7 @@ void* DenseTensor::mutable_data(size_t request_bytes) {
storage_, storage_,
paddle::platform::errors::PreconditionNotMet( paddle::platform::errors::PreconditionNotMet(
"The storage must be valid when call the mutable data function.")); "The storage must be valid when call the mutable data function."));
size_t bytes = numel() * SizeOf(data_type()); size_t bytes = numel() * SizeOf(dtype());
if (request_bytes) { if (request_bytes) {
PADDLE_ENFORCE_GE(request_bytes, PADDLE_ENFORCE_GE(request_bytes,
bytes, bytes,
...@@ -87,19 +85,19 @@ T* DenseTensor::mutable_data() { ...@@ -87,19 +85,19 @@ T* DenseTensor::mutable_data() {
paddle::experimental::CppTypeToDataType<T>::Type(); paddle::experimental::CppTypeToDataType<T>::Type();
} }
PADDLE_ENFORCE( PADDLE_ENFORCE(
(data_type() == paddle::experimental::CppTypeToDataType<T>::Type()), (dtype() == paddle::experimental::CppTypeToDataType<T>::Type()),
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
"The type of data (%d) we are trying to retrieve does not match the " "The type of data (%d) we are trying to retrieve does not match the "
"type of data currently contained in the container (%d).", "type of data currently contained in the container (%d).",
static_cast<int>(paddle::experimental::CppTypeToDataType<T>::Type()), static_cast<int>(paddle::experimental::CppTypeToDataType<T>::Type()),
static_cast<int>(data_type()))); static_cast<int>(dtype())));
return static_cast<T*>(mutable_data()); return static_cast<T*>(mutable_data());
} }
template <typename T> template <typename T>
const T* DenseTensor::data() const { const T* DenseTensor::data() const {
PADDLE_ENFORCE( PADDLE_ENFORCE(
(data_type() == paddle::experimental::CppTypeToDataType<T>::Type()), (dtype() == paddle::experimental::CppTypeToDataType<T>::Type()),
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
"The type of data we are trying to retrieve does not match the " "The type of data we are trying to retrieve does not match the "
"type of data currently contained in the container.")); "type of data currently contained in the container."));
...@@ -115,7 +113,7 @@ const void* DenseTensor::data() const { ...@@ -115,7 +113,7 @@ const void* DenseTensor::data() const {
} }
void DenseTensor::check_memory_size() const { void DenseTensor::check_memory_size() const {
size_t bytes = numel() * SizeOf(data_type()); size_t bytes = numel() * SizeOf(dtype());
PADDLE_ENFORCE_GE(memory_size(), PADDLE_ENFORCE_GE(memory_size(),
bytes, bytes,
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
......
...@@ -93,7 +93,7 @@ class DenseTensor : public TensorBase, ...@@ -93,7 +93,7 @@ class DenseTensor : public TensorBase,
/// \brief Returns the data type of the tensor. /// \brief Returns the data type of the tensor.
/// \return The data type of the tensor. /// \return The data type of the tensor.
DataType data_type() const noexcept override { return meta_.type; } DataType dtype() const noexcept override { return meta_.type; }
/// \brief Returns the data layout of the tensor. /// \brief Returns the data layout of the tensor.
/// \return The data layout of the tensor. /// \return The data layout of the tensor.
......
...@@ -43,7 +43,7 @@ class TensorBase { ...@@ -43,7 +43,7 @@ class TensorBase {
/// \brief Returns the data type of the tensor. /// \brief Returns the data type of the tensor.
/// \return The data type of the tensor. /// \return The data type of the tensor.
virtual DataType data_type() const = 0; virtual DataType dtype() const = 0;
/// \brief Returns the data layout of the tensor. /// \brief Returns the data layout of the tensor.
/// \return The data layout of the tensor. /// \return The data layout of the tensor.
......
...@@ -38,8 +38,8 @@ void Copy(const CPUContext& dev_ctx, const DenseTensor& src, DenseTensor* dst) { ...@@ -38,8 +38,8 @@ void Copy(const CPUContext& dev_ctx, const DenseTensor& src, DenseTensor* dst) {
VLOG(4) << "src:" << src_ptr << ", dst:" << dst_ptr; VLOG(4) << "src:" << src_ptr << ", dst:" << dst_ptr;
CHECK(dst->layout() == src.layout()); CHECK(dst->layout() == src.layout());
auto size = src.numel() * paddle::framework::SizeOfType( auto size = src.numel() *
TransToProtoVarType(src.data_type())); paddle::framework::SizeOfType(TransToProtoVarType(src.dtype()));
if (paddle::platform::is_cpu_place(src_place) && if (paddle::platform::is_cpu_place(src_place) &&
paddle::platform::is_cpu_place(dst_place)) { paddle::platform::is_cpu_place(dst_place)) {
......
...@@ -81,7 +81,7 @@ void Mean(const CUDAContext& dev_ctx, const DenseTensor& x, DenseTensor* out) { ...@@ -81,7 +81,7 @@ void Mean(const CUDAContext& dev_ctx, const DenseTensor& x, DenseTensor* out) {
dev_ctx.GetPlace()); dev_ctx.GetPlace());
pten::DenseTensor tmp( pten::DenseTensor tmp(
alloc, alloc,
DenseTensorMeta(x.data_type(), DenseTensorMeta(x.dtype(),
paddle::framework::make_ddim( paddle::framework::make_ddim(
{static_cast<int64_t>(temp_storage_bytes)}), {static_cast<int64_t>(temp_storage_bytes)}),
x.layout())); x.layout()));
......
...@@ -48,8 +48,8 @@ void Copy(const CUDAContext& dev_ctx, ...@@ -48,8 +48,8 @@ void Copy(const CUDAContext& dev_ctx,
VLOG(4) << "src:" << src_ptr << ", dst:" << dst_ptr; VLOG(4) << "src:" << src_ptr << ", dst:" << dst_ptr;
CHECK(dst->layout() == src.layout()); CHECK(dst->layout() == src.layout());
auto size = src.numel() * paddle::framework::SizeOfType( auto size = src.numel() *
TransToProtoVarType(src.data_type())); paddle::framework::SizeOfType(TransToProtoVarType(src.dtype()));
if (paddle::platform::is_cuda_pinned_place(src_place) && // NOLINT if (paddle::platform::is_cuda_pinned_place(src_place) && // NOLINT
paddle::platform::is_cuda_pinned_place(dst_place)) { paddle::platform::is_cuda_pinned_place(dst_place)) {
......
...@@ -38,8 +38,8 @@ void Copy(const XPUDeviceContext& dev_ctx, ...@@ -38,8 +38,8 @@ void Copy(const XPUDeviceContext& dev_ctx,
<< dst_place; << dst_place;
dst->Resize(src.dims()); dst->Resize(src.dims());
CHECK(dst->layout() == src.layout()); CHECK(dst->layout() == src.layout());
auto size = src.numel() * paddle::framework::SizeOfType( auto size = src.numel() *
TransToProtoVarType(src.data_type())); paddle::framework::SizeOfType(TransToProtoVarType(src.dtype()));
if (paddle::platform::is_xpu_place(src_place) && // NOLINT if (paddle::platform::is_xpu_place(src_place) && // NOLINT
paddle::platform::is_cpu_place(dst_place)) { paddle::platform::is_cpu_place(dst_place)) {
......
...@@ -47,8 +47,7 @@ TEST(tensor_utils, dense_tensor_to_lod_tensor) { ...@@ -47,8 +47,7 @@ TEST(tensor_utils, dense_tensor_to_lod_tensor) {
CHECK(dense_tensor.lod().size() == lod_tensor.lod().size()); CHECK(dense_tensor.lod().size() == lod_tensor.lod().size());
CHECK(dense_tensor.lod()[0] == CHECK(dense_tensor.lod()[0] ==
static_cast<std::vector<size_t>>((lod_tensor.lod()[0]))); static_cast<std::vector<size_t>>((lod_tensor.lod()[0])));
CHECK(dense_tensor.data_type() == CHECK(dense_tensor.dtype() == pten::TransToPtenDataType(lod_tensor.type()));
pten::TransToPtenDataType(lod_tensor.type()));
CHECK(dense_tensor.layout() == CHECK(dense_tensor.layout() ==
pten::TransToPtenDataLayout(lod_tensor.layout())); pten::TransToPtenDataLayout(lod_tensor.layout()));
CHECK(platform::is_cpu_place(lod_tensor.place())); CHECK(platform::is_cpu_place(lod_tensor.place()));
...@@ -58,7 +57,7 @@ TEST(tensor_utils, dense_tensor_to_lod_tensor) { ...@@ -58,7 +57,7 @@ TEST(tensor_utils, dense_tensor_to_lod_tensor) {
auto dense_tensor_1 = MakePtenDenseTensor(lod_tensor); auto dense_tensor_1 = MakePtenDenseTensor(lod_tensor);
CHECK(dense_tensor_1->dims() == dims); CHECK(dense_tensor_1->dims() == dims);
CHECK(dense_tensor_1->data_type() == dtype); CHECK(dense_tensor_1->dtype() == dtype);
CHECK(dense_tensor_1->layout() == layout); CHECK(dense_tensor_1->layout() == layout);
CHECK(dense_tensor_1->lod().size() == lod.size()); CHECK(dense_tensor_1->lod().size() == lod.size());
CHECK(dense_tensor_1->lod()[0] == lod[0]); CHECK(dense_tensor_1->lod()[0] == lod[0]);
...@@ -83,7 +82,7 @@ TEST(tensor_utils, dense_tensor_to_tensor) { ...@@ -83,7 +82,7 @@ TEST(tensor_utils, dense_tensor_to_tensor) {
framework::Tensor tensor; framework::Tensor tensor;
MovesStorage(&dense_tensor, &tensor); MovesStorage(&dense_tensor, &tensor);
CHECK(dense_tensor.data_type() == pten::TransToPtenDataType(tensor.type())); CHECK(dense_tensor.dtype() == pten::TransToPtenDataType(tensor.type()));
CHECK(dense_tensor.layout() == pten::TransToPtenDataLayout(tensor.layout())); CHECK(dense_tensor.layout() == pten::TransToPtenDataLayout(tensor.layout()));
CHECK(platform::is_cpu_place(tensor.place())); CHECK(platform::is_cpu_place(tensor.place()));
...@@ -92,7 +91,7 @@ TEST(tensor_utils, dense_tensor_to_tensor) { ...@@ -92,7 +91,7 @@ TEST(tensor_utils, dense_tensor_to_tensor) {
auto dense_tensor_1 = MakePtenDenseTensor(tensor); auto dense_tensor_1 = MakePtenDenseTensor(tensor);
CHECK(dense_tensor_1->dims() == dims); CHECK(dense_tensor_1->dims() == dims);
CHECK(dense_tensor_1->data_type() == dtype); CHECK(dense_tensor_1->dtype() == dtype);
CHECK(dense_tensor_1->layout() == layout); CHECK(dense_tensor_1->layout() == layout);
const float* data_1 = dense_tensor_1->data<float>(); const float* data_1 = dense_tensor_1->data<float>();
CHECK(data_1[0] == 1.0f); CHECK(data_1[0] == 1.0f);
...@@ -117,7 +116,7 @@ TEST(PtenUtils, VarToPtTensor) { ...@@ -117,7 +116,7 @@ TEST(PtenUtils, VarToPtTensor) {
// 2. test API // 2. test API
auto tensor_x = MakePtenTensorBaseFromVar(v, tensor_def); auto tensor_x = MakePtenTensorBaseFromVar(v, tensor_def);
// 3. check result // 3. check result
ASSERT_EQ(tensor_x->data_type(), pten::DataType::INT32); ASSERT_EQ(tensor_x->dtype(), pten::DataType::INT32);
} }
} // namespace tests } // namespace tests
......
...@@ -82,7 +82,7 @@ TEST(dense_tensor, ctor) { ...@@ -82,7 +82,7 @@ TEST(dense_tensor, ctor) {
bool r{true}; bool r{true};
r = r && (t.numel() == product(m.dims)); r = r && (t.numel() == product(m.dims));
r = r && (t.dims() == m.dims); r = r && (t.dims() == m.dims);
r = r && (t.data_type() == m.type); r = r && (t.dtype() == m.type);
r = r && (t.layout() == m.layout); r = r && (t.layout() == m.layout);
r = r && (t.place() == paddle::platform::CPUPlace()); r = r && (t.place() == paddle::platform::CPUPlace());
r = r && t.initialized(); r = r && t.initialized();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册