From 6a1957e7482fd7575d1b0ffa4f5018c467d4636c Mon Sep 17 00:00:00 2001 From: Baibaifan <39549453+Baibaifan@users.noreply.github.com> Date: Sat, 1 May 2021 14:13:19 +0800 Subject: [PATCH] slove develop bugs (#32560) (#32684) --- paddle/fluid/operators/collective/c_sync_comm_stream_op.cc | 2 -- paddle/fluid/pybind/ascend_wrapper_py.cc | 2 ++ python/paddle/distributed/fleet/launch.py | 4 ++-- python/paddle/distributed/fleet/launch_utils.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/collective/c_sync_comm_stream_op.cc b/paddle/fluid/operators/collective/c_sync_comm_stream_op.cc index e6f6bf5345..772122bb58 100644 --- a/paddle/fluid/operators/collective/c_sync_comm_stream_op.cc +++ b/paddle/fluid/operators/collective/c_sync_comm_stream_op.cc @@ -63,7 +63,6 @@ class CSyncCommStreamCudaKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto place = ctx.GetPlace(); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) - int ring_id = ctx.Attr("ring_id"); auto stream = platform::NCCLCommContext::Instance().Get(ring_id, place)->stream(); @@ -75,7 +74,6 @@ class CSyncCommStreamCudaKernel : public framework::OpKernel { #endif #elif defined(PADDLE_WITH_ASCEND_CL) - auto place = ctx.GetPlace(); PADDLE_ENFORCE_EQ(is_npu_place(place), true, platform::errors::PreconditionNotMet( "Sync stream op can run on npu place only for now.")); diff --git a/paddle/fluid/pybind/ascend_wrapper_py.cc b/paddle/fluid/pybind/ascend_wrapper_py.cc index 9a1fa1d770..43725f7dc0 100644 --- a/paddle/fluid/pybind/ascend_wrapper_py.cc +++ b/paddle/fluid/pybind/ascend_wrapper_py.cc @@ -108,12 +108,14 @@ enum AttrType { AT_NAMEATTR }; +#ifdef PADDLE_WITH_ASCEND void BindAscendDevice(py::module *m) { py::class_(*m, "NPUDevice") .def_static( "get_device_count", static_cast(&platform::ascend::NPUDevice::GetDeviceCount)); } +#endif void BindAscendGraph(py::module *m) { m->def("ge_initialize", &ge_initialize, "GEInitialize"); diff --git a/python/paddle/distributed/fleet/launch.py b/python/paddle/distributed/fleet/launch.py index 89ca7e1961..69c5b325d1 100644 --- a/python/paddle/distributed/fleet/launch.py +++ b/python/paddle/distributed/fleet/launch.py @@ -325,8 +325,8 @@ def which_distributed_mode(args): if fluid.core.is_compiled_with_cuda(): accelerators = fluid.core.get_cuda_device_count() - elif fluid.core.is_compiled_with_ascend(): - accelerators = fluid.core.NPUDevice.get_device_count() + elif fluid.core.is_compiled_with_npu(): + accelerators = fluid.core.get_npu_device_count() elif fluid.core.is_compiled_with_xpu(): accelerators = fluid.core.get_xpu_device_count() else: diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index b4d5c58abb..be7ad257cc 100644 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -653,8 +653,8 @@ def get_xpus(xpus): def get_device_mode(): - if fluid.core.is_compiled_with_ascend() and \ - fluid.core.NPUDevice.get_device_count() > 0: + if fluid.core.is_compiled_with_npu() and \ + fluid.core.get_npu_device_count() > 0: print("launch train in ascend npu mode!") return DeviceMode.ASCEND_NPU -- GitLab