未验证 提交 a020a257 编写于 作者: Z zhaoyuchen2018 提交者: GitHub

Fix model int8 quant fail, test=develop (#22891)

As model fails when enable int8 quant, so disable allocate memory in cpu
for small variable.
上级 dd67d44a
......@@ -187,21 +187,12 @@ void SetTensorFromPyArrayT(
}
} else {
#ifdef PADDLE_WITH_CUDA
T *dst;
if (array.nbytes() <= 4 && !paddle::platform::is_cuda_pinned_place(place)) {
dst = self->mutable_data<T>(platform::CPUPlace());
} else {
dst = self->mutable_data<T>(place);
}
auto dst = self->mutable_data<T>(place);
if (paddle::platform::is_cuda_pinned_place(place)) {
std::memcpy(dst, array.data(), array.nbytes());
} else if (paddle::platform::is_gpu_place(place)) {
if (array.nbytes() <= 4) {
std::memcpy(dst, array.data(), array.nbytes());
} else {
paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
cudaMemcpyHostToDevice);
}
paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
cudaMemcpyHostToDevice);
} else {
PADDLE_THROW(
"Incompatible place type: Tensor.set() supports CPUPlace, CUDAPlace "
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册