diff --git a/paddle/fluid/eager/accumulation/gradient_accumulation.cc b/paddle/fluid/eager/accumulation/gradient_accumulation.cc index 7345c3612381bc9fcc1008f33922fbc705ec81e6..9d475d96e56ce0d06768568f159a4c7630b5bea4 100644 --- a/paddle/fluid/eager/accumulation/gradient_accumulation.cc +++ b/paddle/fluid/eager/accumulation/gradient_accumulation.cc @@ -193,13 +193,14 @@ void TensorAdd(const egr::EagerTensor& src, egr::EagerTensor* dst) { // TODO(jiabin): Support NPU here PADDLE_TENSOR_ADD(float); - // NOTE(phlrain): xpu only support float +// NOTE(phlrain): xpu only support float +#ifndef PADDLE_WITH_XPU PADDLE_TENSOR_ADD(double); // NOTE(chenweihang): only support complex grad tensor accumulated, // support selected rows if needed in the future PADDLE_TENSOR_ADD(paddle::platform::complex); PADDLE_TENSOR_ADD(paddle::platform::complex); - +#endif #undef PADDLE_TENSOR_ADD if (data_type == paddle::framework::proto::VarType::FP16) { @@ -268,13 +269,14 @@ void VariableAdd(const egr::EagerTensor& src, egr::EagerTensor* dst) { // TODO(jiabin): Support NPU here PADDLE_TENSOR_ADD(float); - // NOTE(phlrain): xpu only support float +// NOTE(phlrain): xpu only support float +#ifndef PADDLE_WITH_XPU PADDLE_TENSOR_ADD(double); // NOTE(chenweihang): only support complex grad tensor accumulated, // support selected rows if needed in the future PADDLE_TENSOR_ADD(paddle::platform::complex); PADDLE_TENSOR_ADD(paddle::platform::complex); - +#endif #undef PADDLE_TENSOR_ADD if (data_type == paddle::framework::proto::VarType::FP16) {