diff --git a/paddle/fluid/framework/details/nan_inf_utils_detail.cc b/paddle/fluid/framework/details/nan_inf_utils_detail.cc index 103dd0c5ae599b8126ef63fb8ae456846a2f1966..0fdb97db20af992998d94e37263f415a84cd1ba1 100644 --- a/paddle/fluid/framework/details/nan_inf_utils_detail.cc +++ b/paddle/fluid/framework/details/nan_inf_utils_detail.cc @@ -354,8 +354,36 @@ void CheckVarHasNanOrInf(const std::string& op_type, var_name)); #endif return; - } + } else if (platform::is_npu_place(tensor->place())) { +#ifdef PADDLE_WITH_ASCEND_CL + if (tensor->type() != proto::VarType::FP32) { + return; + } + + framework::LoDTensor cpu_tensor; + cpu_tensor.Resize(tensor->dims()); + float* cpu_data = static_cast( + cpu_tensor.mutable_data(platform::CPUPlace(), tensor->type())); + framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); + bool flag = false; + for (int i = 0; i < cpu_tensor.numel(); i++) { + if (isnan(cpu_data[i]) || isinf(cpu_data[i])) { + flag = true; + break; + } + } + PADDLE_ENFORCE_NE( + flag, true, + platform::errors::Fatal("Operator %s output Tensor %s contains Inf.", + op_type, var_name)); +#else + PADDLE_THROW(platform::errors::PreconditionNotMet( + "Tensor[%s] use npu place. PaddlePaddle must compile with NPU.", + var_name)); +#endif + return; + } tensor_check(op_type, var_name, *tensor, place); }