From 0b6623d77a280549303d310c60edae6577a95537 Mon Sep 17 00:00:00 2001 From: ronnywang <524019753@qq.com> Date: Fri, 10 Sep 2021 11:36:33 +0800 Subject: [PATCH] [NPU] support gradient_accumulator (#35044) --- paddle/fluid/imperative/CMakeLists.txt | 4 +++ .../fluid/imperative/gradient_accumulator.cc | 27 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/paddle/fluid/imperative/CMakeLists.txt b/paddle/fluid/imperative/CMakeLists.txt index 71e16fc1651..595aba88730 100644 --- a/paddle/fluid/imperative/CMakeLists.txt +++ b/paddle/fluid/imperative/CMakeLists.txt @@ -36,6 +36,10 @@ if(WITH_GLOO) endif() endif() +if(NOT WITH_ASCEND_CL) cc_library(gradient_accumulator SRCS gradient_accumulator.cc DEPS blas operator lod_tensor selected_rows selected_rows_functor var_type_traits layer math_function) +else() +cc_library(gradient_accumulator SRCS gradient_accumulator.cc DEPS blas operator lod_tensor selected_rows selected_rows_functor var_type_traits layer math_function npu_op_runner) +endif() add_subdirectory(tests) diff --git a/paddle/fluid/imperative/gradient_accumulator.cc b/paddle/fluid/imperative/gradient_accumulator.cc index 9f08d0b73fc..fbc5453f821 100644 --- a/paddle/fluid/imperative/gradient_accumulator.cc +++ b/paddle/fluid/imperative/gradient_accumulator.cc @@ -31,6 +31,9 @@ #ifdef PADDLE_WITH_XPU #include "xpu/refactor/math.h" #endif +#ifdef PADDLE_WITH_ASCEND_CL +#include "paddle/fluid/operators/npu_op_runner.h" +#endif namespace paddle { namespace imperative { @@ -199,6 +202,30 @@ void TensorAdd(const framework::Variable& src, framework::Variable* dst) { return; \ } +#ifdef PADDLE_WITH_ASCEND_CL + if (platform::is_npu_place(place)) { + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + platform::DeviceContext* ctx = pool.Get(place); + auto dev_ctx = dynamic_cast(ctx); + if (data_type == framework::DataTypeTrait::DataType()) { + dst_tensor->mutable_data(place); + } else if (data_type == framework::DataTypeTrait::DataType()) { + dst_tensor->mutable_data(place); + } else if (data_type == + framework::DataTypeTrait::DataType()) { + dst_tensor->mutable_data(place); + } else { + PADDLE_THROW(platform::errors::Unimplemented( + "Gradient accumulation of data type (%s) on place (%s) is not " + "supported in imperative mode", + framework::DataTypeToString(data_type), place)); + } + const auto& runner = operators::NpuOpRunner( + "Add", {*dst_tensor, src_tensor}, {*dst_tensor}, {}); + runner.Run(dev_ctx->stream()); + return; + } +#endif PADDLE_TENSOR_ADD(float); #ifndef PADDLE_WITH_XPU // NOTE(phlrain): xpu only support float -- GitLab