From 66a28e13b1afa5408d66f70cb55d7013ffaaee7f Mon Sep 17 00:00:00 2001 From: Ruibiao Chen Date: Thu, 23 Jun 2022 19:28:00 +0800 Subject: [PATCH] Remove unnecessary includings for pstring.h (#43752) * Remove unnecessary including for pstring.h * Fix typos --- paddle/fluid/framework/convert_utils.cc | 2 + paddle/fluid/pybind/tensor_py.h | 245 ++++++++++++------ paddle/phi/common/data_type.h | 7 +- paddle/phi/core/string_tensor.cc | 1 + paddle/phi/core/string_tensor.h | 5 +- .../strings/cpu/strings_copy_kernel.cc | 1 + .../tests/api/test_strings_lower_upper_api.cc | 1 + 7 files changed, 174 insertions(+), 88 deletions(-) diff --git a/paddle/fluid/framework/convert_utils.cc b/paddle/fluid/framework/convert_utils.cc index 1ea278ea4f7..112894c2069 100644 --- a/paddle/fluid/framework/convert_utils.cc +++ b/paddle/fluid/framework/convert_utils.cc @@ -11,10 +11,12 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + #include "paddle/fluid/framework/convert_utils.h" // See Note [ Why still include the fluid headers? ] #include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/phi/common/pstring.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index ed7ce64032b..bba8526abd7 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -38,6 +38,7 @@ limitations under the License. */ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler/event_tracing.h" +#include "paddle/phi/common/pstring.h" #include "paddle/phi/core/string_tensor.h" #include "paddle/phi/kernels/strings/unicode.h" #include "pybind11/numpy.h" @@ -180,14 +181,17 @@ template class PYBIND11_HIDDEN NumpyAllocation : public memory::Allocation { public: explicit NumpyAllocation(const py::array &arr) - : Allocation(const_cast(arr.data()), sizeof(T) * (arr.size()), + : Allocation(const_cast(arr.data()), + sizeof(T) * (arr.size()), paddle::platform::CPUPlace()), arr_(arr.ptr()) { - PADDLE_ENFORCE_NOT_NULL(arr_, platform::errors::InvalidArgument( - "The underlying PyObject pointer of " - "numpy array cannot be nullptr")); + PADDLE_ENFORCE_NOT_NULL( + arr_, + platform::errors::InvalidArgument("The underlying PyObject pointer of " + "numpy array cannot be nullptr")); PADDLE_ENFORCE_NE( - arr_, Py_None, + arr_, + Py_None, platform::errors::PreconditionNotMet( "The underlying PyObject pointer of numpy array cannot be None")); Py_INCREF(arr_); @@ -241,7 +245,8 @@ inline std::string TensorDTypeToPyDTypeStr( } else { \ constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker::kValue; \ PADDLE_ENFORCE_EQ( \ - kIsValidDType, true, \ + kIsValidDType, \ + true, \ platform::errors::Unimplemented( \ "This type [%s] of tensor cannot be expose to Python", \ typeid(T).name())); \ @@ -259,7 +264,8 @@ inline std::string TensorDTypeToPyDTypeStr( template T TensorGetElement(const framework::Tensor &self, size_t offset) { - PADDLE_ENFORCE_LT(offset, self.numel(), + PADDLE_ENFORCE_LT(offset, + self.numel(), platform::errors::InvalidArgument( "The offset exceeds the size of tensor.")); @@ -276,29 +282,29 @@ T TensorGetElement(const framework::Tensor &self, size_t offset) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) const T *a = self.data(); auto p = self.place(); - paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T), - nullptr); + paddle::memory::Copy( + platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr); #endif } else if (platform::is_mlu_place(self.place())) { #ifdef PADDLE_WITH_MLU const T *a = self.data(); auto p = self.place(); - paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T), - nullptr); + paddle::memory::Copy( + platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr); #endif } else if (platform::is_npu_place(self.place())) { #if defined(PADDLE_WITH_ASCEND_CL) const T *a = self.data(); auto p = self.place(); - paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T), - nullptr); + paddle::memory::Copy( + platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr); #endif } else if (platform::is_custom_place(self.place())) { #if defined(PADDLE_WITH_CUSTOM_DEVICE) const T *a = self.data(); auto p = self.place(); - paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T), - nullptr); + paddle::memory::Copy( + platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr); #endif } VLOG(10) << "TensorGetElement, place: " << self.place() @@ -308,7 +314,8 @@ T TensorGetElement(const framework::Tensor &self, size_t offset) { template void TensorSetElement(framework::Tensor *self, size_t offset, T elem) { - PADDLE_ENFORCE_LT(offset, self->numel(), + PADDLE_ENFORCE_LT(offset, + self->numel(), platform::errors::InvalidArgument( "The offset exceeds the size of tensor.")); VLOG(10) << "TensorSetElement, place: " << self->place() @@ -325,29 +332,29 @@ void TensorSetElement(framework::Tensor *self, size_t offset, T elem) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) auto p = self->place(); T *a = self->mutable_data(p); - paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T), - nullptr); + paddle::memory::Copy( + p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr); #endif } else if (platform::is_mlu_place(self->place())) { #ifdef PADDLE_WITH_MLU auto p = self->place(); T *a = self->mutable_data(p); - paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T), - nullptr); + paddle::memory::Copy( + p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr); #endif } else if (platform::is_npu_place(self->place())) { #if defined(PADDLE_WITH_ASCEND_CL) auto p = self->place(); T *a = self->mutable_data(p); - paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T), - nullptr); + paddle::memory::Copy( + p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr); #endif } else if (platform::is_custom_place(self->place())) { #if defined(PADDLE_WITH_CUSTOM_DEVICE) auto p = self->place(); T *a = self->mutable_data(p); - paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T), - nullptr); + paddle::memory::Copy( + p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr); #endif } } @@ -356,7 +363,8 @@ template void SetTensorFromPyArrayT( framework::Tensor *self, const py::array_t &array, - const P &place, bool zero_copy) { + const P &place, + bool zero_copy) { std::vector dims; dims.reserve(array.ndim()); for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) { @@ -380,8 +388,11 @@ void SetTensorFromPyArrayT( platform::Place tmp_place = place; platform::XPUDeviceGuard guard(tmp_place.device); auto dst = self->mutable_data(place); - memory::Copy(tmp_place, static_cast(dst), platform::CPUPlace(), - static_cast(array.data()), array.nbytes()); + memory::Copy(tmp_place, + static_cast(dst), + platform::CPUPlace(), + static_cast(array.data()), + array.nbytes()); #else PADDLE_THROW(platform::errors::PermissionDenied( "Cannot use XPUPlace in CPU/GPU version, " @@ -413,8 +424,8 @@ void SetTensorFromPyArrayT( platform::Place tmp_place = place; platform::NPUDeviceGuard guard(tmp_place.device); auto dst = self->mutable_data(place); - platform::NPUMemcpySync(dst, array.data(), array.nbytes(), - ACL_MEMCPY_HOST_TO_DEVICE); + platform::NPUMemcpySync( + dst, array.data(), array.nbytes(), ACL_MEMCPY_HOST_TO_DEVICE); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &ctx = *pool.Get(place); ctx.Wait(); @@ -460,11 +471,11 @@ void SetTensorFromPyArrayT( platform::CUDADeviceGuard guard(place.device); auto dst = self->mutable_data(place); #ifdef PADDLE_WITH_HIP - paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(), - hipMemcpyHostToDevice); + paddle::platform::GpuMemcpySync( + dst, array.data(), array.nbytes(), hipMemcpyHostToDevice); #else - paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(), - cudaMemcpyHostToDevice); + paddle::platform::GpuMemcpySync( + dst, array.data(), array.nbytes(), cudaMemcpyHostToDevice); #endif } else if (paddle::platform::is_cuda_pinned_place(place)) { @@ -486,8 +497,10 @@ void SetTensorFromPyArrayT( } template -void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj, - const P &place, bool zero_copy) { +void SetTensorFromPyArray(framework::Tensor *self, + const py::object &obj, + const P &place, + bool zero_copy) { auto array = obj.cast(); if (py::isinstance>(array)) { SetTensorFromPyArrayT(self, array, place, zero_copy); @@ -504,8 +517,8 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj, } else if (py::isinstance>(array)) { SetTensorFromPyArrayT(self, array, place, zero_copy); } else if (py::isinstance>(array)) { - SetTensorFromPyArrayT(self, array, place, - zero_copy); + SetTensorFromPyArrayT( + self, array, place, zero_copy); } else if (py::isinstance>>( array)) { SetTensorFromPyArrayT, P>( @@ -517,8 +530,8 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj, } else if (py::isinstance>(array)) { // since there is still no support for bfloat16 in NumPy, // uint16 is used for casting bfloat16 - SetTensorFromPyArrayT(self, array, place, - zero_copy); + SetTensorFromPyArrayT( + self, array, place, zero_copy); } else if (py::isinstance>(array)) { SetTensorFromPyArrayT(self, array, place, zero_copy); } else { @@ -533,11 +546,13 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj, } template -void SetStringTensorFromPyArray(phi::StringTensor *self, const py::array &array, +void SetStringTensorFromPyArray(phi::StringTensor *self, + const py::array &array, const P &place) { bool is_string_pyarray = array.dtype().kind() == 'S' || array.dtype().kind() == 'U'; - PADDLE_ENFORCE_EQ(is_string_pyarray, true, + PADDLE_ENFORCE_EQ(is_string_pyarray, + true, platform::errors::InvalidArgument( "Expect the dtype of numpy array is string or " "unicode, but recevie dtype %s", @@ -575,7 +590,8 @@ void SetStringTensorFromPyArray(phi::StringTensor *self, const py::array &array, pstring pstr(utf8_len - 1, 0); phi::strings::GetUTF8Str( reinterpret_cast(array.data()) + unicode_len * i, - pstr.mdata(), unicode_len); + pstr.mdata(), + unicode_len); dst[i] = pstr; } } @@ -588,7 +604,8 @@ void SetStringTensorFromPyArray(phi::StringTensor *self, const py::array &array, template void SetUVATensorFromPyArrayImpl(framework::LoDTensor *self_tensor, - const py::array_t &array, int device_id) { + const py::array_t &array, + int device_id) { #if defined(PADDLE_WITH_CUDA) VLOG(4) << "Running in SetUVATensorFromPyArrayImpl."; std::vector dims; @@ -603,16 +620,19 @@ void SetUVATensorFromPyArrayImpl(framework::LoDTensor *self_tensor, auto data_type = framework::ToDataType(std::type_index(typeid(T))); const auto &need_allocate_size = numel * framework::SizeOfType(data_type); T *data_ptr; - cudaHostAlloc(reinterpret_cast(&data_ptr), need_allocate_size, + cudaHostAlloc(reinterpret_cast(&data_ptr), + need_allocate_size, cudaHostAllocWriteCombined | cudaHostAllocMapped); std::memcpy(data_ptr, array.data(), array.nbytes()); void *cuda_device_pointer = nullptr; cudaHostGetDevicePointer(reinterpret_cast(&cuda_device_pointer), - reinterpret_cast(data_ptr), 0); + reinterpret_cast(data_ptr), + 0); std::shared_ptr holder = std::make_shared( - cuda_device_pointer, need_allocate_size, + cuda_device_pointer, + need_allocate_size, platform::CUDAPlace(device_id)); self_tensor->ResetHolderWithType(holder, framework::TransToPhiDataType(data_type)); @@ -622,7 +642,8 @@ void SetUVATensorFromPyArrayImpl(framework::LoDTensor *self_tensor, template void SetUVATensorFromPyArray( const std::shared_ptr &self, - const py::array_t &array, int device_id) { + const py::array_t &array, + int device_id) { #if defined(PADDLE_WITH_CUDA) VLOG(4) << "Running in SetUVATensorFromPyArray for VarBase."; auto *self_tensor = self->MutableVar()->GetMutable(); @@ -633,7 +654,8 @@ void SetUVATensorFromPyArray( template void SetUVATensorFromPyArray( const std::shared_ptr &self, - const py::array_t &array, int device_id) { + const py::array_t &array, + int device_id) { #if defined(PADDLE_WITH_CUDA) VLOG(4) << "Running in SetUVATensorFromPyArray for Phi::Tensor."; phi::DenseTensorMeta meta = @@ -652,7 +674,8 @@ void SetUVATensorFromPyArray( } template -void _sliceCompute(const framework::Tensor *in, framework::Tensor *out, +void _sliceCompute(const framework::Tensor *in, + framework::Tensor *out, const platform::CPUDeviceContext &ctx, const std::vector &axes, const std::vector &starts) { @@ -688,15 +711,21 @@ void _sliceCompute(const framework::Tensor *in, framework::Tensor *out, template void _concatCompute(const std::vector &ins, paddle::framework::Tensor *out, - const platform::CPUDeviceContext &ctx, int64_t axis) { + const platform::CPUDeviceContext &ctx, + int64_t axis) { if (axis == 0 && ins.size() < 10) { size_t output_offset = 0; for (auto &in : ins) { auto in_stride = phi::stride_numel(in.dims()); auto out_stride = phi::stride_numel(out->dims()); paddle::operators::StridedNumelCopyWithAxis( - ctx, axis, out->data() + output_offset, out_stride, in.data(), - in_stride, in_stride[axis]); + ctx, + axis, + out->data() + output_offset, + out_stride, + in.data(), + in_stride, + in_stride[axis]); output_offset += in_stride[axis]; } } else { @@ -706,9 +735,13 @@ void _concatCompute(const std::vector &ins, } } -inline void _getSliceinfo(const framework::Tensor &self, py::object obj, - const int64_t dim, int64_t *pstart, int64_t *pstop, - int64_t *pstep, int64_t *pslicelength) { +inline void _getSliceinfo(const framework::Tensor &self, + py::object obj, + const int64_t dim, + int64_t *pstart, + int64_t *pstop, + int64_t *pstep, + int64_t *pslicelength) { auto &start = *pstart; auto &stop = *pstop; auto &step = *pstep; @@ -718,7 +751,8 @@ inline void _getSliceinfo(const framework::Tensor &self, py::object obj, 0 <= dim && dim < srcDDim.size(), platform::errors::OutOfRange("The dim %d of slice is out of bounds, it " "shound be in the range of [0, %d).", - dim, srcDDim.size())); + dim, + srcDDim.size())); if (py::isinstance(obj)) { size_t lstart, lstop, lstep, lslicelength; @@ -739,7 +773,9 @@ inline void _getSliceinfo(const framework::Tensor &self, py::object obj, std::abs(start) < srcDDim[dim], platform::errors::OutOfRange("The start %d of slice is out of bounds, " "it shound be in the range of (%d, %d).", - start, -srcDDim[dim], srcDDim[dim])); + start, + -srcDDim[dim], + srcDDim[dim])); start = (start >= 0) ? start : srcDDim[dim] - start; stop = start + 1; step = 1; @@ -779,9 +815,11 @@ inline framework::Tensor *_getTensor(const framework::Tensor &self, } template -void _sliceDapper(const framework::Tensor *in, framework::Tensor *out, +void _sliceDapper(const framework::Tensor *in, + framework::Tensor *out, const platform::CPUDeviceContext &ctx, - const std::vector &axes, const std::vector &starts, + const std::vector &axes, + const std::vector &starts, int size) { switch (size) { case 1: @@ -821,7 +859,9 @@ void _sliceDapper(const framework::Tensor *in, framework::Tensor *out, template inline framework::Tensor *_sliceWrapper(const framework::Tensor &self, const platform::CPUDeviceContext &ctx, - py::object obj, int dim, int64_t start, + py::object obj, + int dim, + int64_t start, int64_t slicelength) { framework::DDim dstDDim = self.dims(); dstDDim[dim] = static_cast(slicelength); @@ -834,7 +874,8 @@ inline framework::Tensor *_sliceWrapper(const framework::Tensor &self, template inline framework::Tensor *_sliceAndConcat(const framework::Tensor &self, - py::object obj, int dim) { + py::object obj, + int dim) { platform::CPUDeviceContext ctx; int64_t start, stop, step, slicelength; _getSliceinfo(self, obj, dim, &start, &stop, &step, &slicelength); @@ -856,7 +897,8 @@ inline framework::Tensor *_sliceAndConcat(const framework::Tensor &self, } inline framework::Tensor *_sliceTensor(const framework::Tensor &self, - py::object obj, int dim) { + py::object obj, + int dim) { auto src_type = framework::TransToProtoVarType(self.dtype()); switch (src_type) { case framework::proto::VarType::FP16: @@ -959,43 +1001,53 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, !is_custom_device_tensor) { if (!need_deep_copy) { auto base = py::cast(std::move(tensor)); - return py::array(py::dtype(py_dtype_str.c_str()), py_dims, py_strides, - const_cast(tensor_buf_ptr), base); + return py::array(py::dtype(py_dtype_str.c_str()), + py_dims, + py_strides, + const_cast(tensor_buf_ptr), + base); } else { py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides); PADDLE_ENFORCE_EQ( - py_arr.writeable(), true, + py_arr.writeable(), + true, platform::errors::InvalidArgument( "PyArray is not writable, in which case memory leak " "or double free would occur")); PADDLE_ENFORCE_EQ( - py_arr.owndata(), true, + py_arr.owndata(), + true, platform::errors::InvalidArgument( "PyArray does not own data, in which case memory leak " "or double free would occur")); platform::CPUPlace place; size_t copy_bytes = sizeof_dtype * numel; - paddle::memory::Copy(place, py_arr.mutable_data(), place, tensor_buf_ptr, - copy_bytes); + paddle::memory::Copy( + place, py_arr.mutable_data(), place, tensor_buf_ptr, copy_bytes); return py_arr; } } else if (is_xpu_tensor) { #ifdef PADDLE_WITH_XPU py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides); - PADDLE_ENFORCE_EQ(py_arr.writeable(), true, + PADDLE_ENFORCE_EQ(py_arr.writeable(), + true, platform::errors::InvalidArgument( "PyArray is not writable, in which case memory leak " "or double free would occur")); PADDLE_ENFORCE_EQ( - py_arr.owndata(), true, + py_arr.owndata(), + true, platform::errors::InvalidArgument( "PyArray does not own data, in which case memory leak " "or double free would occur")); size_t copy_bytes = sizeof_dtype * numel; auto p = tensor.place(); - paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p, - tensor_buf_ptr, copy_bytes); + paddle::memory::Copy(platform::CPUPlace(), + py_arr.mutable_data(), + p, + tensor_buf_ptr, + copy_bytes); return py_arr; #else PADDLE_THROW(platform::errors::PermissionDenied( @@ -1005,20 +1057,26 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, } else if (is_gpu_tensor) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides); - PADDLE_ENFORCE_EQ(py_arr.writeable(), true, + PADDLE_ENFORCE_EQ(py_arr.writeable(), + true, platform::errors::InvalidArgument( "PyArray is not writable, in which case memory leak " "or double free would occur")); PADDLE_ENFORCE_EQ( - py_arr.owndata(), true, + py_arr.owndata(), + true, platform::errors::InvalidArgument( "PyArray does not own data, in which case memory leak " "or double free would occur")); size_t copy_bytes = sizeof_dtype * numel; auto p = tensor.place(); - paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p, - tensor_buf_ptr, copy_bytes, nullptr); + paddle::memory::Copy(platform::CPUPlace(), + py_arr.mutable_data(), + p, + tensor_buf_ptr, + copy_bytes, + nullptr); return py_arr; #else PADDLE_THROW(platform::errors::PermissionDenied( @@ -1028,12 +1086,14 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, } else if (is_npu_tensor) { #ifdef PADDLE_WITH_ASCEND_CL py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides); - PADDLE_ENFORCE_EQ(py_arr.writeable(), true, + PADDLE_ENFORCE_EQ(py_arr.writeable(), + true, platform::errors::InvalidArgument( "PyArray is not writable, in which case memory leak " "or double free would occur")); PADDLE_ENFORCE_EQ( - py_arr.owndata(), true, + py_arr.owndata(), + true, platform::errors::InvalidArgument( "PyArray does not own data, in which case memory leak " "or double free would occur")); @@ -1043,7 +1103,10 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &ctx = *pool.Get(tensor.place()); paddle::memory::Copy( - platform::CPUPlace(), py_arr.mutable_data(), p, tensor_buf_ptr, + platform::CPUPlace(), + py_arr.mutable_data(), + p, + tensor_buf_ptr, copy_bytes, reinterpret_cast(ctx).stream()); ctx.Wait(); @@ -1056,12 +1119,14 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, } else if (is_mlu_tensor) { #ifdef PADDLE_WITH_MLU py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides); - PADDLE_ENFORCE_EQ(py_arr.writeable(), true, + PADDLE_ENFORCE_EQ(py_arr.writeable(), + true, platform::errors::InvalidArgument( "PyArray is not writable, in which case memory leak " "or double free would occur")); PADDLE_ENFORCE_EQ( - py_arr.owndata(), true, + py_arr.owndata(), + true, platform::errors::InvalidArgument( "PyArray does not own data, in which case memory leak " "or double free would occur")); @@ -1071,7 +1136,10 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &ctx = *pool.Get(tensor.place()); paddle::memory::Copy( - platform::CPUPlace(), py_arr.mutable_data(), p, tensor_buf_ptr, + platform::CPUPlace(), + py_arr.mutable_data(), + p, + tensor_buf_ptr, copy_bytes, reinterpret_cast(ctx).stream()); ctx.Wait(); @@ -1084,12 +1152,14 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, } else if (is_custom_device_tensor) { #ifdef PADDLE_WITH_CUSTOM_DEVICE py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides); - PADDLE_ENFORCE_EQ(py_arr.writeable(), true, + PADDLE_ENFORCE_EQ(py_arr.writeable(), + true, platform::errors::InvalidArgument( "PyArray is not writable, in which case memory leak " "or double free would occur")); PADDLE_ENFORCE_EQ( - py_arr.owndata(), true, + py_arr.owndata(), + true, platform::errors::InvalidArgument( "PyArray does not own data, in which case memory leak " "or double free would occur")); @@ -1098,8 +1168,11 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &ctx = *pool.Get(tensor.place()); paddle::memory::Copy( - platform::CPUPlace(), py_arr.mutable_data(), tensor.place(), - tensor_buf_ptr, copy_bytes, + platform::CPUPlace(), + py_arr.mutable_data(), + tensor.place(), + tensor_buf_ptr, + copy_bytes, reinterpret_cast(ctx).stream()); ctx.Wait(); return py_arr; diff --git a/paddle/phi/common/data_type.h b/paddle/phi/common/data_type.h index ef9b4250482..369d8ad49e1 100644 --- a/paddle/phi/common/data_type.h +++ b/paddle/phi/common/data_type.h @@ -18,7 +18,12 @@ limitations under the License. */ #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/complex.h" #include "paddle/phi/common/float16.h" -#include "paddle/phi/common/pstring.h" + +namespace phi { +namespace dtype { +class pstring; +} // namespace dtype +} // namespace phi namespace paddle { namespace experimental { diff --git a/paddle/phi/core/string_tensor.cc b/paddle/phi/core/string_tensor.cc index 20cbf3dffcb..f1f35364344 100644 --- a/paddle/phi/core/string_tensor.cc +++ b/paddle/phi/core/string_tensor.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/phi/core/string_tensor.h" #include "paddle/fluid/memory/malloc.h" +#include "paddle/phi/common/pstring.h" namespace phi { diff --git a/paddle/phi/core/string_tensor.h b/paddle/phi/core/string_tensor.h index 94c9974f4ad..0391099faab 100644 --- a/paddle/phi/core/string_tensor.h +++ b/paddle/phi/core/string_tensor.h @@ -14,13 +14,16 @@ limitations under the License. */ #pragma once -#include "paddle/phi/common/pstring.h" #include "paddle/phi/core/allocator.h" #include "paddle/phi/core/tensor_base.h" #include "paddle/phi/core/tensor_meta.h" namespace phi { +namespace dtype { +class pstring; +} // namespace dtype + /// \brief In Paddle 2.3, we add a new type of Tensor, StringTensor, /// which is designed for string data management. /// During the entire life cycle of a StringTensor, its device type and key diff --git a/paddle/phi/kernels/strings/cpu/strings_copy_kernel.cc b/paddle/phi/kernels/strings/cpu/strings_copy_kernel.cc index efd69c6e2f9..cd6e5fadd49 100644 --- a/paddle/phi/kernels/strings/cpu/strings_copy_kernel.cc +++ b/paddle/phi/kernels/strings/cpu/strings_copy_kernel.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/phi/kernels/strings/strings_copy_kernel.h" #include "glog/logging.h" +#include "paddle/phi/common/pstring.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { diff --git a/paddle/phi/tests/api/test_strings_lower_upper_api.cc b/paddle/phi/tests/api/test_strings_lower_upper_api.cc index c8abae1836f..66542f5110e 100644 --- a/paddle/phi/tests/api/test_strings_lower_upper_api.cc +++ b/paddle/phi/tests/api/test_strings_lower_upper_api.cc @@ -19,6 +19,7 @@ limitations under the License. */ #include "paddle/phi/api/include/strings_api.h" #include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/phi/backends/all_context.h" +#include "paddle/phi/common/pstring.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/string_tensor.h" -- GitLab