From 1e3a94be87c23c180533e5d6bcb348396f079c8e Mon Sep 17 00:00:00 2001 From: An Improved PeleeNet Algorithm with Feature Pyramid Networks for Image Detection <39549453+Baibaifan@users.noreply.github.com> Date: Mon, 19 Apr 2021 19:09:37 +0800 Subject: [PATCH] add npu check nan and inf (#32340) add npu check nan and inf (#32340) --- .../framework/details/nan_inf_utils_detail.cc | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/nan_inf_utils_detail.cc b/paddle/fluid/framework/details/nan_inf_utils_detail.cc index 103dd0c5ae..0fdb97db20 100644 --- a/paddle/fluid/framework/details/nan_inf_utils_detail.cc +++ b/paddle/fluid/framework/details/nan_inf_utils_detail.cc @@ -354,8 +354,36 @@ void CheckVarHasNanOrInf(const std::string& op_type, var_name)); #endif return; - } + } else if (platform::is_npu_place(tensor->place())) { +#ifdef PADDLE_WITH_ASCEND_CL + if (tensor->type() != proto::VarType::FP32) { + return; + } + + framework::LoDTensor cpu_tensor; + cpu_tensor.Resize(tensor->dims()); + float* cpu_data = static_cast( + cpu_tensor.mutable_data(platform::CPUPlace(), tensor->type())); + framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); + bool flag = false; + for (int i = 0; i < cpu_tensor.numel(); i++) { + if (isnan(cpu_data[i]) || isinf(cpu_data[i])) { + flag = true; + break; + } + } + PADDLE_ENFORCE_NE( + flag, true, + platform::errors::Fatal("Operator %s output Tensor %s contains Inf.", + op_type, var_name)); +#else + PADDLE_THROW(platform::errors::PreconditionNotMet( + "Tensor[%s] use npu place. PaddlePaddle must compile with NPU.", + var_name)); +#endif + return; + } tensor_check(op_type, var_name, *tensor, place); } -- GitLab