未验证 提交 72241a6a 编写于 作者: A Aganlengzi 提交者: GitHub

[NPU] reorganization for device API abstraction (#37110)

* [NPU] reorganization for device API abstraction

* [NPU] delete old files

* [NPU] fix npu_collective_helper

* [NPU] fix collective_helper

* [NPU] fix ut

* [NPU] mod memory allocation and hccl_helper

* [NPU] fix place_type

* [NPU] split enfoce.h

* move acl* call into npu_info

* merge conflict

* fix merge

* merge conflict

* merge conflict
上级 8fbb9fa3
......@@ -34,7 +34,7 @@
#include "xpu/refactor/math.h"
#endif
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#endif
namespace egr {
......
......@@ -17,7 +17,7 @@
#include "paddle/fluid/framework/op_proto_maker.h"
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#endif
namespace paddle {
......
......@@ -32,7 +32,7 @@
#include "xpu/refactor/math.h"
#endif
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#endif
namespace paddle {
......
......@@ -22,7 +22,7 @@
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
namespace paddle {
namespace framework {
......
......@@ -19,8 +19,8 @@
#include <vector>
#include "paddle/fluid/imperative/parallel_context.h"
#include "paddle/fluid/platform/dynload/hccl.h"
#include "paddle/fluid/platform/npu_resource_pool.h"
#include "paddle/fluid/platform/device/npu/dynload/hccl.h"
#include "paddle/fluid/platform/device/npu/npu_resource_pool.h"
namespace paddle {
namespace framework {
......
......@@ -290,7 +290,7 @@ void Tensor::CopyToCpuImpl(T *data, void *exec_stream, CallbackFunc cb,
paddle::memory::Copy(paddle::platform::CPUPlace(),
static_cast<void *>(data), npu_place, t_data,
ele_num * sizeof(T), dev_ctx->stream());
aclrtSynchronizeStream(dev_ctx->stream());
paddle::platform::NPUStreamSync(dev_ctx->stream());
#else
PADDLE_THROW(paddle::platform::errors::Unavailable(
"Can not create tensor with NPU place because paddle is not compiled "
......
......@@ -23,7 +23,6 @@
#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h"
#include "paddle/fluid/memory/allocation/retry_allocator.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/place.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
......
......@@ -22,7 +22,6 @@
#include "paddle/fluid/memory/detail/system_allocator.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/string/printf.h"
......@@ -33,6 +32,9 @@
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/platform/device/xpu/xpu_header.h"
#endif
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/device/npu/npu_info.h"
#endif
PADDLE_DEFINE_EXPORTED_bool(
init_allocated_mem, false,
......@@ -327,8 +329,8 @@ void *Alloc<platform::NPUPlace>(const platform::NPUPlace &place, size_t size) {
size_t avail, total;
platform::NPUMemoryUsage(&avail, &total);
PADDLE_THROW(platform::errors::ResourceExhausted(
"Cannot allocate %s in GPU %d, avaliable %s, total %s, GpuMinChunkSize "
"%s, GpuMaxChunkSize %s, GPU memory used: %s.",
"Cannot allocate %s in NPU %d, avaliable %s, total %s, NpuMinChunkSize "
"%s, NpuMaxChunkSize %s, NPU memory used: %s.",
string::HumanReadableSize(size), place.device,
string::HumanReadableSize(avail), string::HumanReadableSize(total),
string::HumanReadableSize(buddy_allocator->GetMinChunkSize()),
......@@ -336,7 +338,7 @@ void *Alloc<platform::NPUPlace>(const platform::NPUPlace &place, size_t size) {
string::HumanReadableSize(Used<platform::NPUPlace>(place))));
} else {
if (FLAGS_init_allocated_mem) {
aclrtMemset(ptr, size, 0xEF, size);
platform::NPUMemsetSync(ptr, 0xEF, size, size);
}
}
VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place);
......@@ -387,8 +389,7 @@ void *Alloc<platform::NPUPinnedPlace>(const platform::NPUPinnedPlace &place,
void *ptr = buddy_allocator->Alloc(size);
if (ptr == nullptr) {
LOG(WARNING) << "aclrtMallocHost Cannot allocate " << size
<< " bytes in NPUPinnedPlace";
LOG(WARNING) << "Cannot allocate " << size << " bytes in NPUPinnedPlace";
}
if (FLAGS_init_allocated_mem) {
memset(ptr, 0xEF, size);
......
......@@ -14,8 +14,8 @@
#include "paddle/fluid/memory/allocation/npu_allocator.h"
#include <string>
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/npu_info.h"
namespace paddle {
namespace memory {
......
......@@ -23,7 +23,7 @@ void NPUPinnedAllocator::ProcessEventsAndFree() {
for (auto it = npu_events_.begin(); it != npu_events_.end();) {
aclrtEvent event = it->second;
aclrtEventStatus status = ACL_EVENT_STATUS_COMPLETE;
PADDLE_ENFORCE_NPU_SUCCESS(aclrtQueryEvent(event, &status));
platform::NPUEventQuery(event, &status);
if (status == ACL_EVENT_STATUS_COMPLETE) {
Allocation *allocation = it->first;
......@@ -31,7 +31,7 @@ void NPUPinnedAllocator::ProcessEventsAndFree() {
free(ptr);
npu_events_.erase(it++);
delete allocation;
PADDLE_ENFORCE_NPU_SUCCESS(aclrtDestroyEvent(event));
platform::NPUEventDestroy(event);
} else {
++it;
}
......@@ -67,12 +67,12 @@ void NPUPinnedAllocator::FreeImpl(Allocation *allocation) {
aclrtEvent event = iter->second;
aclrtEventStatus status = ACL_EVENT_STATUS_COMPLETE;
PADDLE_ENFORCE_NPU_SUCCESS(aclrtQueryEvent(event, &status));
platform::NPUEventQuery(event, &status);
if (status == ACL_EVENT_STATUS_COMPLETE) {
free(ptr);
npu_events_.erase(allocation);
delete allocation;
PADDLE_ENFORCE_NPU_SUCCESS(aclrtDestroyEvent(event));
platform::NPUEventDestroy(event);
}
return;
}
......@@ -87,8 +87,8 @@ void NPUPinnedAllocator::RecordEvent(Allocation *allocation,
aclrtStream stream) {
std::lock_guard<std::mutex> lock(mtx_);
aclrtEvent event = nullptr;
PADDLE_ENFORCE_NPU_SUCCESS(aclrtCreateEvent(&event));
PADDLE_ENFORCE_NPU_SUCCESS(aclrtRecordEvent(event, stream));
platform::NPUEventCreate(&event);
platform::NPUEventRecord(event, stream);
npu_events_.insert({allocation, event});
}
......
......@@ -21,7 +21,7 @@
#include "acl/acl.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/place.h"
namespace paddle {
......
......@@ -25,8 +25,8 @@ limitations under the License. */
#include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/memory/detail/system_allocator.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/npu_info.h"
namespace paddle {
namespace memory {
......
......@@ -24,8 +24,8 @@ limitations under the License. */
#include "gflags/gflags.h"
#include "gtest/gtest.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/npu_info.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_ASCEND_CL)
......
......@@ -27,9 +27,9 @@ limitations under the License. */
#include "gflags/gflags.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/npu_info.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/cuda_device_guard.h"
......@@ -326,14 +326,14 @@ void* NPUPinnedAllocator::Alloc(size_t* index, size_t size) {
void* p;
// PINNED memory is visible to all NPU contexts.
auto result = aclrtMallocHost(&p, size);
auto result = platform::NPUHostMalloc(&p, size);
if (result == ACL_ERROR_NONE) {
*index = 1; // PINNED memory
npu_pinnd_alloc_size_ += size;
return p;
} else {
LOG(WARNING) << "aclrtMallocHost failed.";
LOG(WARNING) << "NPUHostMalloc failed.";
return nullptr;
}
......@@ -351,14 +351,13 @@ void NPUPinnedAllocator::Free(void* p, size_t size, size_t index) {
"allocated npu pinned memory (%d)",
size, npu_pinnd_alloc_size_));
npu_pinnd_alloc_size_ -= size;
err = aclrtFreeHost(p);
err = platform::NPUHostFree(p);
if (err != ACL_ERROR_NONE) {
PADDLE_ENFORCE_EQ(
err, 0,
platform::errors::Fatal(
"aclrtFreeHost failed in NPUPinnedAllocator, error code is %d",
err));
"NPUHostFree failed in NPUPinnedAllocator, error code is %d", err));
}
}
......
......@@ -164,7 +164,6 @@ endif()
if (WITH_ASCEND_CL)
cc_test(assign_op_npu_test SRCS assign_op_npu_test.cc DEPS assign_op)
cc_library(npu_op_runner SRCS npu_op_runner.cc DEPS operator npu_info)
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} npu_op_runner)
endif()
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the Licnse. */
#include "paddle/fluid/operators/abs_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -19,7 +19,7 @@ limitations under the Licnse. */
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include <cmath>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/amp/check_finite_and_unscale_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include <cmath>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include <cmath>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
DECLARE_int32(min_loss_scaling);
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the Licnse. */
#include "paddle/fluid/operators/arg_min_max_op_base.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/arg_min_max_op_base.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/argsort_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/assign_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/batch_norm_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/bce_loss_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/cast_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/clip_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -21,7 +21,7 @@
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device_memory_aligment.h"
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#endif
namespace paddle {
......
......@@ -18,7 +18,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace paddle {
......
......@@ -38,7 +38,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace f = paddle::framework;
......
......@@ -38,7 +38,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace f = paddle::framework;
......
......@@ -21,7 +21,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
defined(PADDLE_WITH_ASCEND_CL) || defined(PADDLE_WITH_XPU_BKCL)
......@@ -42,7 +42,7 @@ limitations under the License. */
#endif
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
#if defined(PADDLE_WITH_ASCEND_CL)
......
......@@ -35,7 +35,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
// Node1: HCCL_WHITELIST_DISABLE=1 FLAGS_selected_npus=1 GLOG_v=4 RANK_ID=1
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace paddle {
......
......@@ -35,7 +35,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace f = paddle::framework;
......
......@@ -22,11 +22,10 @@ class Scope;
} // namespace framework
} // namespace paddle
#if defined(PADDLE_WITH_ASCEND_CL)
#include "acl/acl.h"
#include "hccl/hccl.h"
#include "hccl/hccl_types.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace paddle {
......@@ -69,12 +68,11 @@ class CCommInitOpAscend : public framework::OperatorBase {
for (int32_t idx = 0; idx < size; idx++) {
input[idx] = 1.0;
}
PADDLE_ENFORCE_NPU_SUCCESS(aclrtMalloc(reinterpret_cast<void**>(&buff),
size * sizeof(float),
ACL_MEM_MALLOC_HUGE_FIRST));
PADDLE_ENFORCE_NPU_SUCCESS(aclrtMemcpy(
reinterpret_cast<void*>(buff), size * sizeof(float), input.data(),
size * sizeof(float), ACL_MEMCPY_HOST_TO_DEVICE));
PADDLE_ENFORCE_NPU_SUCCESS(platform::RecordedNPUMalloc(
reinterpret_cast<void**>(&buff), size * sizeof(float), device_id));
platform::NPUMemcpySync(reinterpret_cast<void*>(buff), input.data(),
size * sizeof(float), ACL_MEMCPY_HOST_TO_DEVICE,
size * sizeof(float));
VLOG(3) << "Build buff data successful.";
aclrtStream stream = nullptr;
......@@ -88,7 +86,7 @@ class CCommInitOpAscend : public framework::OperatorBase {
PADDLE_ENFORCE_NPU_SUCCESS(platform::dynload::HcclBroadcast(
buff, size, HCCL_DATA_TYPE_FP32, 0, comm->comm(), stream));
// Synchronize stream to find hccl error in time.
PADDLE_ENFORCE_NPU_SUCCESS(aclrtSynchronizeStream(stream));
platform::NPUStreamSync(stream);
VLOG(3) << "Build connection successful.";
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
......
......@@ -17,8 +17,8 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/collective/c_embedding_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......@@ -136,11 +136,10 @@ void NPUGetIdsEmbedding(const framework::ExecutionContext &context) {
uint8_t *pad_data = reinterpret_cast<uint8_t *>(
table_t_pad.mutable_data<T>(pad_shape, context.GetPlace()));
PADDLE_ENFORCE_NPU_SUCCESS(
aclrtMemcpyAsync(pad_data, mem_size, table_t->data<T>(), mem_size,
ACL_MEMCPY_DEVICE_TO_DEVICE, stream));
PADDLE_ENFORCE_NPU_SUCCESS(aclrtMemsetAsync(
pad_data + mem_size, line_mem_size, 0, line_mem_size, stream));
platform::NPUMemcpyAsync(pad_data, table_t->data<T>(), mem_size,
ACL_MEMCPY_DEVICE_TO_DEVICE, stream, mem_size);
platform::NPUMemsetAsync(pad_data + mem_size, 0, line_mem_size, stream,
line_mem_size);
output_t->mutable_data<T>(context.GetPlace());
NpuOpRunner runner;
......@@ -202,8 +201,8 @@ void NPUUpdateEmbedding(const framework::ExecutionContext &context) {
table_t_pad.mutable_data<T>(pad_shape, context.GetPlace()));
size_t table_t_pad_mem_size =
table_t_pad.numel() * framework::SizeOfType(table_t_pad.type());
PADDLE_ENFORCE_NPU_SUCCESS(aclrtMemsetAsync(pad_data, table_t_pad_mem_size, 0,
table_t_pad_mem_size, stream));
platform::NPUMemsetAsync(pad_data, 0, table_t_pad_mem_size, stream,
table_t_pad_mem_size);
// NOTE(zhiqiu): It seems in cann 20.1, the first input and output
// can be different tensor, but in cann 20.2+, it does inplace operation.
......@@ -225,8 +224,8 @@ void NPUUpdateEmbedding(const framework::ExecutionContext &context) {
platform::errors::InvalidArgument(
"NPU only accept the second dim must align by 64"));
PADDLE_ENFORCE_NPU_SUCCESS(aclrtMemcpyAsync(
dst, mem_size, pad_data, mem_size, ACL_MEMCPY_DEVICE_TO_DEVICE, stream));
platform::NPUMemcpyAsync(dst, pad_data, mem_size, ACL_MEMCPY_DEVICE_TO_DEVICE,
stream, mem_size);
}
template <typename T>
......
......@@ -23,7 +23,7 @@ limitations under the License. */
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/dynload/hccl.h"
#include "paddle/fluid/platform/device/npu/dynload/hccl.h"
#include "paddle/fluid/platform/gen_comm_id_helper.h"
namespace paddle {
......
......@@ -43,7 +43,7 @@ limitations under the License. */
#endif
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace paddle {
......
......@@ -35,7 +35,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace f = paddle::framework;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace paddle {
......
......@@ -38,7 +38,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace f = paddle::framework;
......
......@@ -69,7 +69,7 @@ class CSyncCalcStreamKernel : public framework::OpKernel<T> {
auto dev_ctx = static_cast<platform::NPUDeviceContext*>(
platform::DeviceContextPool::Instance().Get(place));
PADDLE_ENFORCE_NPU_SUCCESS(aclrtSynchronizeStream(dev_ctx->stream()));
platform::NPUStreamSync(dev_ctx->stream());
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
......
......@@ -21,7 +21,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace paddle {
......@@ -80,7 +80,7 @@ class CSyncCommStreamKernel : public framework::OpKernel<T> {
int ring_id = ctx.Attr<int>("ring_id");
auto stream =
platform::HCCLCommContext::Instance().Get(ring_id, place)->stream();
PADDLE_ENFORCE_NPU_SUCCESS(aclrtSynchronizeStream(stream));
platform::NPUStreamSync(stream);
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
......
......@@ -35,7 +35,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace f = paddle::framework;
......
......@@ -36,7 +36,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace f = paddle::framework;
......
......@@ -21,9 +21,9 @@ limitations under the License. */
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/var_type_traits.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/split.h"
......
......@@ -31,7 +31,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
DECLARE_int32(get_host_by_name_time);
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <memory>
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/collective/partial_recv_op.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/collective/send_v2_op.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace paddle {
......
......@@ -35,7 +35,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace f = paddle::framework;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace paddle {
......
......@@ -34,7 +34,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif
namespace f = paddle::framework;
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/concat_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/controlflow/compare_op.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -10,7 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/controlflow/logical_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/conv_transpose_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/crop_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/cum_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -10,7 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/box_coder_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -10,7 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/density_prior_box_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/iou_similarity_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/prior_box_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -18,7 +18,7 @@ limitations under the License. */
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/dropout_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -18,7 +18,7 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/elementwise/elementwise_add_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_npu.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_div_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_div_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_max_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_npu.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -18,7 +18,7 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/elementwise/elementwise_min_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_npu.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_mod_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_npu.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_mul_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_npu.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_npu.h"
#include "paddle/fluid/operators/elementwise/elementwise_pow_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_sub_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -12,7 +12,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/expand_as_v2_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/expand_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/expand_v2_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/eye_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fill_any_like_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fill_constant_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fill_constant_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fill_zeros_like_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/flatten_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/gather_nd_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -18,8 +18,8 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/kron_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/gelu_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/group_norm_op.h"
#include <vector>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/huber_loss_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#include "paddle/fluid/operators/increment_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/index_sample_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/index_select_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/interpolate_op.h"
#include <string>
#include <vector>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/interpolate_v2_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the Licnse. */
#include "paddle/fluid/operators/kldiv_loss_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#include "paddle/fluid/operators/label_smooth_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/layer_norm_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/log_loss_op.h"
#include <cmath>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#include "paddle/fluid/operators/log_softmax_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/masked_select_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/beam_search.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace framework {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/math/concat_and_split.h"
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#endif
namespace paddle {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/matmul_v2_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -10,7 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/mean_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/meshgrid_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/controlflow/compare_op.h"
#include "paddle/fluid/operators/metrics/accuracy_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/mul_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -10,7 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/norm_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/one_hot_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/one_hot_v2_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,8 +16,8 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/optimizers/adam_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/optimizers/momentum_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/optimizers/sgd_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -10,7 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/optimizers/rmsprop_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,8 +15,8 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/optimizers/sgd_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/p_norm_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/pool_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/range_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -240,10 +240,8 @@ void BufferedReader::ReadAsync(size_t i) {
platform::SetNPUDeviceId(
BOOST_GET_CONST(platform::NPUPlace, place_).device);
PADDLE_ENFORCE_NPU_SUCCESS(
aclrtRecordEvent(events_[i].get(), compute_stream_));
PADDLE_ENFORCE_NPU_SUCCESS(
aclrtStreamWaitEvent(stream_.get(), events_[i].get()));
platform::NPUEventRecord(events_[i].get(), compute_stream_);
platform::NPUStreamWaitEvent(stream_.get(), events_[i].get());
platform::RecordEvent record_event("BufferedReader:MemoryCopy");
for (size_t i = 0; i < cpu.size(); ++i) {
......@@ -260,11 +258,11 @@ void BufferedReader::ReadAsync(size_t i) {
memory::Copy(BOOST_GET_CONST(platform::NPUPlace, place_), npu_ptr,
BOOST_GET_CONST(platform::CPUPlace, cpu_place), cpu_ptr,
size, stream_.get());
PADDLE_ENFORCE_NPU_SUCCESS(aclrtSynchronizeStream(stream_.get()));
platform::NPUStreamSync(stream_.get());
}
npu[i].set_lod(cpu[i].lod());
}
PADDLE_ENFORCE_NPU_SUCCESS(aclrtSynchronizeStream(stream_.get()));
platform::NPUStreamSync(stream_.get());
}
#endif
return i;
......
......@@ -26,8 +26,8 @@
#include "paddle/fluid/platform/gpu_info.h"
#endif
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/npu_resource_pool.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_resource_pool.h"
#endif
namespace paddle {
namespace operators {
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/reduce_ops/reduce_mean_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_npu.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/reduce_ops/reduce_prod_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,9 +15,9 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/reduce_ops/reduce_op.h"
#include "paddle/fluid/operators/unsqueeze_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -11,7 +11,7 @@ limitations under the License. */
#include "paddle/fluid/operators/roi_align_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/scale_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -17,8 +17,8 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/kron_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/scatter_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/seed_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_mask_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/set_value_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,8 +15,8 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/shape_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#include "paddle/fluid/operators/shard_index_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#include "paddle/fluid/operators/mul_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/slice_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/smooth_l1_loss_op.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -12,8 +12,8 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/softmax_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,8 +16,8 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/softmax_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,8 +15,8 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/split_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/squared_l2_norm_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/stack_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/strided_slice_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/slice_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,8 +16,8 @@ limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/sum_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,9 +13,9 @@ See the License for the specific language governing permissions and
limitations under the Licnse. */
#include "paddle/fluid/operators/batch_norm_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -12,7 +12,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/tile_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -12,8 +12,8 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/top_k_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/top_k_v2_op.h"
#include <string>
#include <vector>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/expand_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/tril_triu_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/truncated_gaussian_random_op.h"
#include <memory>
#include <string>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -16,8 +16,8 @@ limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/unsqueeze_op.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace ops = paddle::operators;
namespace plat = paddle::platform;
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/unstack_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/where_index_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#include "paddle/fluid/operators/where_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
namespace operators {
......
......@@ -4,12 +4,6 @@ if(WITH_GPU)
proto_library(external_error_proto SRCS external_error.proto)
endif(WITH_GPU)
if(WITH_ASCEND)
set(ASCEND_DEPS xpulib)
ELSE()
set(ASCEND_DEPS)
endif(WITH_ASCEND)
if (WITH_PYTHON)
py_proto_compile(profiler_py_proto SRCS profiler.proto)
add_custom_target(profiler_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
......@@ -69,15 +63,6 @@ cc_library(place SRCS place.cc DEPS enforce boost)
cc_test(place_test SRCS place_test.cc DEPS place glog gflags)
add_subdirectory(device)
if(WITH_ASCEND)
cc_library(ascend_npu_info SRCS ascend_npu_info.cc DEPS gflags glog enforce atlas_acl)
endif()
if(WITH_ASCEND_CL)
cc_library(npu_info SRCS npu_info.cc DEPS gflags glog enforce monitor ascendcl acl_op_compiler)
endif()
add_subdirectory(dynload)
add_subdirectory(stream)
......@@ -134,7 +119,10 @@ cc_library(device_context SRCS device_context.cc init.cc DEPS simple_threadpool
place eigen3 stringpiece cpu_helper cpu_info framework_proto ${GPU_CTX_DEPS} ${NPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}
${dgc_deps} dlpack cudnn_workspace_helper ${XPU_CTX_DEPS})
cc_library(collective_helper SRCS collective_helper.cc collective_helper_npu.cc gen_comm_id_helper.cc DEPS framework_proto device_context enforce)
cc_library(collective_helper SRCS collective_helper.cc gen_comm_id_helper.cc DEPS framework_proto device_context enforce)
if(WITH_ASCEND_CL)
target_link_libraries(collective_helper npu_collective_helper)
endif()
if(WITH_GPU OR WITH_ROCM)
cc_library(cuda_resource_pool SRCS cuda_resource_pool.cc DEPS gpu_info)
......@@ -142,7 +130,6 @@ if(WITH_GPU OR WITH_ROCM)
endif()
if(WITH_ASCEND_CL)
cc_library(npu_resource_pool SRCS npu_resource_pool.cc DEPS npu_info)
target_link_libraries(device_context npu_resource_pool)
endif()
......
......@@ -21,8 +21,8 @@
#include "boost/variant.hpp"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/device/npu/dynload/hccl.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/dynload/hccl.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
......
......@@ -2,3 +2,8 @@
IF(WITH_XPU)
add_subdirectory(xpu)
ENDIF()
# NPU
IF(WITH_ASCEND OR WITH_ASCEND_CL)
add_subdirectory(npu)
ENDIF()
# NPU
add_subdirectory(dynload)
if(WITH_ASCEND)
cc_library(ascend_npu_info SRCS ascend_npu_info.cc DEPS gflags glog enforce atlas_acl)
endif()
if(WITH_ASCEND_CL)
cc_library(npu_info SRCS npu_info.cc DEPS gflags glog enforce monitor ascendcl acl_op_compiler)
cc_library(npu_resource_pool SRCS npu_resource_pool.cc DEPS npu_info)
cc_library(npu_stream SRCS npu_stream.cc DEPS enforce boost stream_callback_manager)
cc_library(npu_collective_helper SRCS npu_collective_helper.cc DEPS npu_stream npu_info)
cc_library(npu_op_runner SRCS npu_op_runner.cc DEPS operator npu_info)
endif()
......@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/ascend_npu_info.h"
#include "paddle/fluid/platform/device/npu/ascend_npu_info.h"
#include <glog/logging.h>
#include "acl/acl_rt.h"
......
if(WITH_ASCEND_CL)
cc_library(npu_hccl SRCS hccl.cc DEPS dynamic_loader warpctc)
endif()
......@@ -14,7 +14,7 @@ limitations under the License. */
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/dynload/hccl.h"
#include "paddle/fluid/platform/device/npu/dynload/hccl.h"
namespace paddle {
namespace platform {
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifdef PADDLE_WITH_ASCEND_CL
#include <string>
#include "paddle/fluid/platform/enforce.h"
#include "acl/acl.h"
#include "hccl/hccl_types.h"
namespace paddle {
namespace platform {
namespace details {
template <typename T>
struct NPUStatusType {};
#define DEFINE_NPU_STATUS_TYPE(type, success_value) \
template <> \
struct NPUStatusType<type> { \
using Type = type; \
static constexpr Type kSuccess = success_value; \
}
DEFINE_NPU_STATUS_TYPE(aclError, ACL_ERROR_NONE);
DEFINE_NPU_STATUS_TYPE(HcclResult, HCCL_SUCCESS);
} // namespace details
inline std::string build_npu_error_msg(aclError stat) {
std::ostringstream sout;
sout << " ACL error, the error code is : " << stat << ". ";
return sout.str();
}
inline std::string build_npu_error_msg(HcclResult stat) {
std::ostringstream sout;
sout << " HCCL error, the error code is : " << stat << ". ";
return sout.str();
}
#define PADDLE_ENFORCE_NPU_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
using __NPU_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::paddle::platform::details::NPUStatusType< \
__NPU_STATUS_TYPE__>::kSuccess; \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = ::paddle::platform::errors::External( \
::paddle::platform::build_npu_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
} // namespace platform
} // namespace paddle
#endif // PADDLE_WITH_ASCEND_CL
......@@ -14,8 +14,7 @@
#pragma once
#if defined(PADDLE_WITH_HCCL) || defined(PADDLE_WITH_RCCL) || \
defined(PADDLE_WITH_ASCEND_CL)
#ifdef PADDLE_WITH_ASCEND_CL
#include <stdio.h>
#include <memory>
......@@ -25,13 +24,11 @@
#include <unordered_map>
#include <vector>
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/dynload/hccl.h"
#endif
#include "paddle/fluid/platform/device/npu/dynload/hccl.h"
#include "paddle/fluid/platform/device/npu/enforce_npu.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
#define HCCL_ID_VARNAME "HCCLID"
......@@ -137,7 +134,7 @@ struct HCCLContextMap {
}
VLOG(1) << "init hccl rank:" << rank << ", nranks:" << nranks
<< ", gpu_id:" << gpu_id << ", dev_id:" << order_[i];
aclrtSetDevice(gpu_id);
SetNPUDeviceId(gpu_id);
PADDLE_ENFORCE_NPU_SUCCESS(platform::dynload::HcclCommInitRootInfo(
nranks, hccl_id, rank, comms.get() + i));
}
......
......@@ -13,8 +13,9 @@
// limitations under the License.
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include <utility>
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/npu/enforce_npu.h"
namespace paddle {
namespace platform {
......@@ -79,7 +80,7 @@ HCCLComm* HCCLCommContext::CreateHCCLComm(HcclRootInfo* hccl_id, int nranks,
"Expected dev_id >= 0. But received dev_id is %d.", dev_id));
HcclComm comm;
PADDLE_ENFORCE_NPU_SUCCESS(aclrtSetDevice(dev_id));
SetNPUDeviceId(dev_id);
VLOG(1) << "initialized comm: " << &comm << ", nranks: " << nranks
<< ", hccl_id: " << hccl_id << ", rank: " << rank;
PADDLE_ENFORCE_NPU_SUCCESS(
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include <algorithm>
#include <cstdlib>
#include <memory>
......@@ -74,6 +74,10 @@ int GetCurrentNPUDeviceId() {
return device_id;
}
void GetCurrentNPUContext(aclrtContext *context) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtGetCurrentContext(context));
}
//! Get a list of device ids from environment variable or use all.
std::vector<int> GetSelectedNPUDevices() {
// use user specified NPUs in single-node multi-process mode.
......@@ -215,6 +219,11 @@ void NPUMemcpyPeerSync(void *dst, int dst_device, const void *src, size_t count,
PADDLE_ENFORCE_NPU_SUCCESS(aclrtMemcpy(dst, dst_max_count, src, count, kind));
}
void NPUMemsetSync(void *dst, int value, size_t count, size_t max_count) {
max_count = max_count ? max_count : count;
PADDLE_ENFORCE_NPU_SUCCESS(aclrtMemset(dst, max_count, value, count));
}
void NPUMemsetAsync(void *dst, int value, size_t count, aclrtStream stream,
size_t max_count) {
max_count = max_count ? max_count : count;
......@@ -222,10 +231,38 @@ void NPUMemsetAsync(void *dst, int value, size_t count, aclrtStream stream,
aclrtMemsetAsync(dst, max_count, value, count, stream));
}
void NPUStreamCreate(aclrtStream *stream) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtCreateStream(stream));
}
void NPUStreamSync(aclrtStream stream) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtSynchronizeStream(stream));
}
void NPUStreamDestroy(aclrtStream stream) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtDestroyStream(stream));
}
void NPUEventCreate(aclrtEvent *event) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtCreateEvent(event));
}
void NPUEventDestroy(aclrtEvent event) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtDestroyEvent(event));
}
void NPUEventRecord(aclrtEvent event, aclrtStream stream) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtRecordEvent(event, stream));
}
void NPUEventQuery(aclrtEvent event, aclrtEventStatus *status) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtQueryEvent(event, status));
}
void NPUStreamWaitEvent(aclrtStream stream, aclrtEvent event) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtStreamWaitEvent(stream, event));
}
static void RaiseNonOutOfMemoryError(aclError *status) {
if (*status == ACL_ERROR_BAD_ALLOC) {
*status = ACL_ERROR_NONE;
......@@ -378,6 +415,18 @@ bool IsNPUMallocRecorded(int dev_id) {
return RecordedNPUMallocHelper::Instance(dev_id)->NeedRecord();
}
aclError NPUHostMalloc(void **ptr, size_t size) {
return aclrtMallocHost(ptr, size);
}
aclError NPUHostFree(void *ptr) { return aclrtFreeHost(ptr); }
void NPULaunchCallback(aclrtCallback fn, void *userData,
aclrtCallbackBlockType blockType, aclrtStream stream) {
PADDLE_ENFORCE_NPU_SUCCESS(
aclrtLaunchCallback(fn, userData, blockType, stream));
}
AclInstance::~AclInstance() {}
AclInstance &AclInstance::Instance() {
......
......@@ -21,7 +21,7 @@ limitations under the License. */
#include <vector>
#include "acl/acl.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/device/npu/enforce_npu.h"
namespace paddle {
namespace platform {
......@@ -31,12 +31,16 @@ int GetNPUDeviceCount();
//! Get the runtime version of the ith NPU
std::string GetNPURuntimeVersion(int id);
//! Check if this device can access peer or not.
int NPUCanAccessPeer(int src, int dst);
//! Get the current NPU device id in system.
int GetCurrentNPUDeviceId();
//! Get the current NPU context.
void GetCurrentNPUContext(aclrtContext *context);
//! Get the current NPU stream.
int GetCurrentStream();
......@@ -80,6 +84,9 @@ void NPUMemcpyAsync(void *dst, const void *src, size_t count,
void NPUMemcpySync(void *dst, const void *src, size_t count,
enum aclrtMemcpyKind kind, size_t dst_max_count = 0);
//! Set memory dst with value count size synchronously.
void NPUMemsetSync(void *dst, int value, size_t count, size_t max_count = 0);
//! Set memory dst with value count size asynchronously
void NPUMemsetAsync(void *dst, int value, size_t count, aclrtStream stream,
size_t max_count = 0);
......@@ -93,9 +100,36 @@ void NPUMemcpyPeerAsync(void *dst, int dst_device, const void *src,
void NPUMemcpyPeerSync(void *dst, int dst_device, const void *src,
int src_device, size_t count, size_t max_count = 0);
//! Create NPU stream.
void NPUStreamCreate(aclrtStream *stream);
//! Blocks until stream has completed all operations.
void NPUStreamSync(aclrtStream stream);
//! Destroy NPU stream.
void NPUStreamDestroy(aclrtStream stream);
//! Create NPU Event.
void NPUEventCreate(aclrtEvent *event);
//! Destroy NPU Event.
void NPUEventDestroy(aclrtEvent event);
//! Query NPU event status.
void NPUEventQuery(aclrtEvent event, aclrtEventStatus *status);
//! Record NPU event in the stream.
void NPUEventRecord(aclrtEvent event, aclrtStream stream);
//! Makes a stream wait on an event.
void NPUStreamWaitEvent(aclrtStream stream, aclrtEvent event);
//! Alloc host or device memory.
aclError NPUHostMalloc(void **ptr, size_t size);
//! Frees host or device memory.
aclError NPUHostFree(void *ptr);
//! aclrtMalloc with recorded info
aclError RecordedNPUMalloc(void **ptr, size_t size, int dev_id);
......@@ -111,6 +145,10 @@ uint64_t RecordedNPUMallocSize(int dev_id);
bool IsNPUMallocRecorded(int dev_id);
//! Adds a callback function executed on the host or device to the stream.
void NPULaunchCallback(aclrtCallback fn, void *userData,
aclrtCallbackBlockType blockType, aclrtStream stream);
class NPUDeviceGuard {
public:
explicit inline NPUDeviceGuard(int dev_id) {
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#include <paddle/fluid/framework/data_type.h>
#include <paddle/fluid/framework/operator.h>
......
......@@ -22,7 +22,7 @@ limitations under the License. */
#include "acl/acl.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/platform/device/npu/enforce_npu.h"
namespace paddle {
namespace operators {
......
......@@ -18,7 +18,7 @@ limitations under the License. */
#include <vector>
#include "acl/acl_prof.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/device/npu/enforce_npu.h"
namespace paddle {
namespace platform {
......
......@@ -13,8 +13,8 @@
// limitations under the License.
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/npu_resource_pool.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_resource_pool.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
namespace paddle {
namespace platform {
......@@ -26,13 +26,13 @@ NpuStreamResourcePool::NpuStreamResourcePool() {
auto creator = [dev_idx] {
platform::SetNPUDeviceId(dev_idx);
aclrtStream stream;
PADDLE_ENFORCE_NPU_SUCCESS(aclrtCreateStream(&stream));
NPUStreamCreate(&stream);
return stream;
};
auto deleter = [dev_idx](aclrtStream stream) {
platform::SetNPUDeviceId(dev_idx);
PADDLE_ENFORCE_NPU_SUCCESS(aclrtDestroyStream(stream));
NPUStreamDestroy(stream);
};
pool_.emplace_back(ResourcePool<NpuStreamObject>::Create(creator, deleter));
......@@ -64,13 +64,13 @@ NpuEventResourcePool::NpuEventResourcePool() {
auto creator = [dev_idx] {
platform::SetNPUDeviceId(dev_idx);
aclrtEvent event;
PADDLE_ENFORCE_NPU_SUCCESS(aclrtCreateEvent(&event));
NPUEventCreate(&event);
return event;
};
auto deleter = [dev_idx](aclrtEvent event) {
platform::SetNPUDeviceId(dev_idx);
PADDLE_ENFORCE_NPU_SUCCESS(aclrtDestroyEvent(event));
NPUEventDestroy(event);
};
pool_.emplace_back(ResourcePool<NpuEventObject>::Create(creator, deleter));
......
......@@ -12,9 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/stream/npu_stream.h"
#include "paddle/fluid/platform/device/npu/npu_stream.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/npu_info.h"
namespace paddle {
namespace platform {
......@@ -26,7 +25,7 @@ bool NPUStream::Init(const Place& place) {
"NPU stream must be created using npu place."));
place_ = place;
NPUDeviceGuard guard(BOOST_GET_CONST(NPUPlace, place_).device);
PADDLE_ENFORCE_NPU_SUCCESS(aclrtCreateStream(&stream_));
NPUStreamCreate(&stream_);
callback_manager_.reset(new StreamCallbackManager<aclrtStream>(stream_));
VLOG(3) << "NPUStream Init stream: " << stream_;
return true;
......@@ -37,14 +36,12 @@ void NPUStream::Destroy() {
Wait();
WaitCallback();
if (stream_) {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtDestroyStream(stream_));
NPUStreamDestroy(stream_);
}
stream_ = nullptr;
}
void NPUStream::Wait() const {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtSynchronizeStream(stream_));
}
void NPUStream::Wait() const { NPUStreamSync(stream_); }
} // namespace stream
} // namespace platform
......
......@@ -17,8 +17,8 @@ limitations under the License. */
#include <cstdint>
#include <memory>
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/stream_callback_manager.h"
......@@ -44,16 +44,12 @@ class NPUStream final {
template <typename Callback>
void RecordEvent(aclrtEvent ev, Callback callback) const {
callback();
PADDLE_ENFORCE_NPU_SUCCESS(aclrtRecordEvent(ev, stream_));
NPUEventRecord(ev, stream_);
}
void RecordEvent(aclrtEvent ev) const {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtRecordEvent(ev, stream_));
}
void RecordEvent(aclrtEvent ev) const { NPUEventRecord(ev, stream_); }
void WaitEvent(aclrtEvent ev) const {
PADDLE_ENFORCE_NPU_SUCCESS(aclrtStreamWaitEvent(stream_, ev));
}
void WaitEvent(aclrtEvent ev) const { NPUStreamWaitEvent(stream_, ev); }
void Wait() const;
void WaitCallback() const { callback_manager_->Wait(); }
......
......@@ -274,7 +274,7 @@ NPUDeviceContext::NPUDeviceContext(NPUPlace place) : place_(place) {
// NOTE(zhiqiu): Usually, no need to create context explicitly,
// ACL creates a default context which contains 1 default stream
// and 1 sync strean after aclrtSetDevice.
PADDLE_ENFORCE_NPU_SUCCESS(aclrtGetCurrentContext(&context_));
platform::GetCurrentNPUContext(&context_);
stream_.reset(new stream::NPUStream(place));
}
......
......@@ -60,7 +60,8 @@ namespace mkldnn = dnnl;
#include "paddle/fluid/platform/stream/cuda_stream.h"
#endif
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/stream/npu_stream.h"
#include "paddle/fluid/platform/device/npu/enforce_npu.h"
#include "paddle/fluid/platform/device/npu/npu_stream.h"
#endif
#include "unsupported/Eigen/CXX11/Tensor"
......@@ -80,7 +81,7 @@ struct GpuDevice;
#ifdef PADDLE_WITH_ASCEND_CL
#include "acl/acl.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#endif
namespace paddle {
......
......@@ -19,11 +19,9 @@ limitations under the License. */
#include "paddle/fluid/platform/place.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/gpu_info.h"
#elif defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/npu_info.h"
#endif
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#endif
namespace paddle {
......
......@@ -37,7 +37,7 @@ if(WITH_ROCM)
hip_library(dynload_cuda SRCS ${HIP_SRCS} DEPS dynamic_loader)
cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc)
elseif (WITH_ASCEND_CL)
cc_library(dynload_warpctc SRCS warpctc.cc hccl.cc DEPS dynamic_loader warpctc)
cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc npu_hccl)
else()
nv_library(dynload_cuda SRCS ${CUDA_SRCS} DEPS dynamic_loader)
cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc)
......
......@@ -46,11 +46,6 @@ limitations under the License. */
#include <thrust/system_error.h> // NOLINT
#endif
#ifdef PADDLE_WITH_ASCEND_CL
#include "acl/acl.h"
#include "hccl/hccl_types.h"
#endif // PADDLE_WITH_ASCEND_CL
#include <fstream>
#include <iomanip>
#include <memory>
......@@ -1200,48 +1195,5 @@ inline void retry_sleep(unsigned millisecond) {
#undef DEFINE_EXTERNAL_API_TYPE
#endif // PADDLE_WITH_HIP
#ifdef PADDLE_WITH_ASCEND_CL
namespace details {
template <typename T>
struct NPUStatusType {};
#define DEFINE_NPU_STATUS_TYPE(type, success_value) \
template <> \
struct NPUStatusType<type> { \
using Type = type; \
static constexpr Type kSuccess = success_value; \
}
DEFINE_NPU_STATUS_TYPE(aclError, ACL_ERROR_NONE);
DEFINE_NPU_STATUS_TYPE(HcclResult, HCCL_SUCCESS);
} // namespace details
inline std::string build_npu_error_msg(aclError stat) {
std::ostringstream sout;
sout << " ACL error, the error code is : " << stat << ". ";
return sout.str();
}
inline std::string build_npu_error_msg(HcclResult stat) {
std::ostringstream sout;
sout << " HCCL error, the error code is : " << stat << ". ";
return sout.str();
}
#define PADDLE_ENFORCE_NPU_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
using __NPU_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::paddle::platform::details::NPUStatusType< \
__NPU_STATUS_TYPE__>::kSuccess; \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = ::paddle::platform::errors::External( \
::paddle::platform::build_npu_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
#endif // PADDLE_WITH_ASCEND_CL
} // namespace platform
} // namespace paddle
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/fluid/platform/cpu_helper.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/string/split.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/cuda_device_guard.h"
......
......@@ -19,6 +19,9 @@ limitations under the License. */
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/variant.h"
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/device/npu/enforce_npu.h"
#endif
namespace paddle {
namespace platform {
......
......@@ -7,7 +7,3 @@ ENDIF()
IF(WITH_GPU OR WITH_ROCM)
cc_library(cuda_stream SRCS cuda_stream.cc DEPS enforce boost ${MKLDNN_CTX_DEPS})
ENDIF()
IF(WITH_ASCEND_CL)
cc_library(npu_stream SRCS npu_stream.cc DEPS enforce boost stream_callback_manager)
ENDIF()
......@@ -13,7 +13,9 @@
// limitations under the License.
#include "paddle/fluid/platform/stream_callback_manager.h"
#include "paddle/fluid/platform/enforce.h"
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/device/npu/npu_info.h"
#endif
namespace paddle {
namespace platform {
......@@ -73,8 +75,7 @@ void StreamCallbackManager<Stream>::AddCallback(
#if PADDLE_WITH_ASCEND_CL
VLOG(3) << "aclrtLaunchCallback at stream: " << stream_;
// TODO(zhiqiu): failed to call aclrtLaunchCallback
PADDLE_ENFORCE_NPU_SUCCESS(aclrtLaunchCallback(StreamCallbackFunc, func,
ACL_CALLBACK_BLOCK, stream_));
NPULaunchCallback(StreamCallbackFunc, func, ACL_CALLBACK_BLOCK, stream_);
#endif
}
......@@ -87,7 +88,7 @@ void StreamCallbackManager<Stream>::Wait() const {
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_));
#endif
#ifdef PADDLE_WITH_ASCEND_CL
PADDLE_ENFORCE_NPU_SUCCESS(aclrtSynchronizeStream(stream_));
NPUStreamSync(stream_);
#endif
{
std::lock_guard<std::mutex> lock(mtx_);
......
......@@ -32,7 +32,7 @@ limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/fleet/ascend_wrapper.h"
#include "paddle/fluid/platform/ascend_npu_info.h"
#include "paddle/fluid/platform/device/npu/ascend_npu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/ascend_wrapper_py.h"
......
......@@ -120,8 +120,8 @@ limitations under the License. */
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/platform/npu_profiler.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_profiler.h"
#endif
#ifdef PADDLE_WITH_XPU
......
......@@ -15,9 +15,9 @@ limitations under the License. */
#include "gflags/gflags.h"
#include "gtest/gtest.h"
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/fluid/platform/init.h"
#include "paddle/fluid/platform/npu_info.h"
int main(int argc, char** argv) {
paddle::memory::allocation::UseAllocatorStrategyGFlag();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册