From 8363406a015995796e0ccea2eefd5f4be3f00b3a Mon Sep 17 00:00:00 2001 From: chentianyu03 Date: Fri, 18 Feb 2022 10:52:24 +0800 Subject: [PATCH] [pten]add T, remove default value of DataType in DeviceContext::Alloc (#39620) * add T to Alloc and remove default value of DataType in DeviceContext::Alloc * add dtype --- paddle/pten/core/device_context.h | 6 ++---- paddle/pten/kernels/cpu/copy_kernel.cc | 2 +- paddle/pten/kernels/cpu/split_kernel.cc | 2 +- paddle/pten/kernels/gpu/split_kernel.cu | 2 +- paddle/pten/kernels/reshape_kernel.cc | 4 ++-- paddle/pten/kernels/xpu/copy_kernel.cc | 2 +- 6 files changed, 8 insertions(+), 10 deletions(-) diff --git a/paddle/pten/core/device_context.h b/paddle/pten/core/device_context.h index 05753b531ff..08f277732dc 100644 --- a/paddle/pten/core/device_context.h +++ b/paddle/pten/core/device_context.h @@ -99,9 +99,7 @@ class DeviceContext { /** * @brief Allocate device memory for tensor. */ - void* Alloc(TensorBase*, - DataType dtype = DataType::UNDEFINED, - size_t requested_size = 0) const; + void* Alloc(TensorBase*, DataType dtype, size_t requested_size = 0) const; template T* Alloc(TensorBase* tensor, size_t requested_size = 0) const; @@ -110,7 +108,7 @@ class DeviceContext { * @brief Allocate host memory for tensor. */ void* HostAlloc(TensorBase* tensor, - DataType dtype = DataType::UNDEFINED, + DataType dtype, size_t requested_size = 0) const; template diff --git a/paddle/pten/kernels/cpu/copy_kernel.cc b/paddle/pten/kernels/cpu/copy_kernel.cc index 8df53c064a1..c70b4154a75 100644 --- a/paddle/pten/kernels/cpu/copy_kernel.cc +++ b/paddle/pten/kernels/cpu/copy_kernel.cc @@ -37,7 +37,7 @@ void Copy(const Context& dev_ctx, << src_place; dst->Resize(src.dims()); - auto* dst_ptr = dev_ctx.Alloc(dst); + auto* dst_ptr = dev_ctx.Alloc(dst, src.dtype()); if (src_ptr == dst_ptr) { VLOG(3) << "Skip copy the same data async from " << src_place << " to " diff --git a/paddle/pten/kernels/cpu/split_kernel.cc b/paddle/pten/kernels/cpu/split_kernel.cc index 78fcdcb155c..450f6ff8774 100644 --- a/paddle/pten/kernels/cpu/split_kernel.cc +++ b/paddle/pten/kernels/cpu/split_kernel.cc @@ -44,7 +44,7 @@ void SplitKernel(const Context& dev_ctx, std::vector shape_refer; for (size_t j = 0; j < outs.size(); ++j) { - dev_ctx.Alloc(outs[j]); + dev_ctx.template Alloc(outs[j]); shape_refer.emplace_back(outs[j]); } diff --git a/paddle/pten/kernels/gpu/split_kernel.cu b/paddle/pten/kernels/gpu/split_kernel.cu index 46d18b75b61..747acfcfefc 100644 --- a/paddle/pten/kernels/gpu/split_kernel.cu +++ b/paddle/pten/kernels/gpu/split_kernel.cu @@ -43,7 +43,7 @@ void SplitKernel(const Context& dev_ctx, std::vector shape_refer; for (size_t j = 0; j < outs.size(); ++j) { - dev_ctx.Alloc(outs[j]); + dev_ctx.template Alloc(outs[j]); shape_refer.emplace_back(outs[j]); } diff --git a/paddle/pten/kernels/reshape_kernel.cc b/paddle/pten/kernels/reshape_kernel.cc index c52d251582b..a11442ce166 100644 --- a/paddle/pten/kernels/reshape_kernel.cc +++ b/paddle/pten/kernels/reshape_kernel.cc @@ -29,10 +29,10 @@ void ReshapeKernel(const Context& dev_ctx, MetaTensor meta_out(out); InferMetaFromVecValue(x, shape.GetData(), &meta_out); if (x.initialized() && x.Holder() == out->Holder()) { - dev_ctx.Alloc(out); + dev_ctx.Alloc(out, x.dtype()); return; } - dev_ctx.Alloc(out); + dev_ctx.Alloc(out, x.dtype()); // TODO(chenweihang): the output dims are overwrite after copying, // here we need to use copy method that only copy data auto dims = out->dims(); diff --git a/paddle/pten/kernels/xpu/copy_kernel.cc b/paddle/pten/kernels/xpu/copy_kernel.cc index fa0331f24dd..af49b2eb6f0 100644 --- a/paddle/pten/kernels/xpu/copy_kernel.cc +++ b/paddle/pten/kernels/xpu/copy_kernel.cc @@ -30,7 +30,7 @@ void Copy(const Context& dev_ctx, bool blocking, DenseTensor* dst) { auto* src_ptr = src.data(); - auto* dst_ptr = dev_ctx.Alloc(dst); + auto* dst_ptr = dev_ctx.Alloc(dst, src.dtype()); const auto& src_place = src.place(); const auto& dst_place = dst->place(); -- GitLab