From a020a25797cd8f3c7e0e8b7ff28deb8631dff021 Mon Sep 17 00:00:00 2001 From: zhaoyuchen2018 <45989343+zhaoyuchen2018@users.noreply.github.com> Date: Mon, 9 Mar 2020 14:35:07 +0800 Subject: [PATCH] Fix model int8 quant fail, test=develop (#22891) As model fails when enable int8 quant, so disable allocate memory in cpu for small variable. --- paddle/fluid/pybind/tensor_py.h | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index d1d7681b7b..9e5dc63851 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -187,21 +187,12 @@ void SetTensorFromPyArrayT( } } else { #ifdef PADDLE_WITH_CUDA - T *dst; - if (array.nbytes() <= 4 && !paddle::platform::is_cuda_pinned_place(place)) { - dst = self->mutable_data(platform::CPUPlace()); - } else { - dst = self->mutable_data(place); - } + auto dst = self->mutable_data(place); if (paddle::platform::is_cuda_pinned_place(place)) { std::memcpy(dst, array.data(), array.nbytes()); } else if (paddle::platform::is_gpu_place(place)) { - if (array.nbytes() <= 4) { - std::memcpy(dst, array.data(), array.nbytes()); - } else { - paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(), - cudaMemcpyHostToDevice); - } + paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(), + cudaMemcpyHostToDevice); } else { PADDLE_THROW( "Incompatible place type: Tensor.set() supports CPUPlace, CUDAPlace " -- GitLab