/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef _WIN32 #include #endif #include #include #include // NOLINT #include #include "gtest/gtest.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/operators/dropout_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/string/printf.h" #include "paddle/fluid/operators/collective/c_broadcast_op.h" #include "paddle/fluid/operators/collective/gen_hccl_id_op_helper.h" #if defined(PADDLE_WITH_ASCEND_CL) #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/npu/hccl_helper.h" #endif namespace f = paddle::framework; namespace p = paddle::platform; namespace m = paddle::operators::math; USE_OP(c_broadcast); USE_OP_DEVICE_KERNEL(c_sync_comm_stream, NPU); USE_NO_KERNEL_OP(c_gen_hccl_id); USE_NO_KERNEL_OP(c_comm_init_hccl); USE_OP_DEVICE_KERNEL(c_broadcast, NPU); DECLARE_string(selected_npus); template void PrintDebugInfo(const std::string preStr, const std::vector& data) { std::string debugstring = ""; for (auto ele : data) { debugstring += std::to_string(ele) + std::string(","); } VLOG(2) << preStr << ":" << std::endl << debugstring; } void PrepareUniqueId(f::Scope* scope, const p::DeviceContext& ctx, HcclRootInfo* hccl_id) { int rank_id = atoi(getenv("RANK_ID")); int device_id = atoi(getenv("DEVICE_ID")); VLOG(2) << "rank_id = " << rank_id << "; device_id = " << device_id << "; rank_id = " << rank_id << "; RANK_TABLE_FILE = " << atoi(getenv("DEVICE_ID")); std::vector rank_ids{0, 1}; f::AttributeMap gen_hccl_id; std::vector endpointList = {"127.0.0.1:6175", "127.0.0.1:6177"}; gen_hccl_id["rank"] = rank_id; gen_hccl_id["endpoint"] = endpointList[rank_id]; std::vector other_endpoints = { endpointList[rank_id == 0 ? 1 : 0]}; gen_hccl_id["other_endpoints"] = other_endpoints; auto out = scope->Var("Out"); auto id = out->GetMutable(); VLOG(3) << "break"; auto comm_init_op = f::OpRegistry::CreateOp("c_gen_hccl_id", {}, {{"Out", {"Out"}}}, gen_hccl_id); VLOG(3) << "break"; auto place = ctx.GetPlace(); comm_init_op->Run(*scope, place); ctx.Wait(); memcpy(hccl_id, id, 1024); } void Prepare(f::Scope* scope, const p::DeviceContext& ctx, HcclRootInfo* hccl_id) { auto x = scope->Var("X"); auto id = x->GetMutable(); memcpy(id, hccl_id, 1024); int rank_id = atoi(getenv("RANK_ID")); int device_id = atoi(getenv("DEVICE_ID")); VLOG(2) << "rank_id = " << rank_id << "; device_id = " << device_id << "; rank_id = " << rank_id << "; RANK_TABLE_FILE = " << atoi(getenv("DEVICE_ID")); // std::vector rank_ids{0, 1}; f::AttributeMap comm_init_attrs; comm_init_attrs["ring_id"] = 0; comm_init_attrs["rank_ids"] = 2; comm_init_attrs["rank"] = rank_id; comm_init_attrs["device_id"] = device_id; // comm_init_attrs["rank_ids"] = rank_ids; auto comm_init_op = f::OpRegistry::CreateOp( "c_comm_init_hccl", {{"X", {"X"}}}, {}, comm_init_attrs); auto place = ctx.GetPlace(); comm_init_op->Run(*scope, place); ctx.Wait(); } void TestHCCLBroadcastOp(f::Scope* scope, const p::DeviceContext& ctx) { std::cout << "BEGIN TEST:" << __FUNCTION__ << std::endl; // init auto x = scope->Var("Data"); auto tensor_x = x->GetMutable(); int num = 2; std::vector init; int rank_id = atoi(getenv("RANK_ID")); std::cout << "rank_id:" << rank_id << std::endl; for (int64_t i = 0; i < num * num; ++i) { init.push_back(1.0 + rank_id); std::cout << init[0]; } std::cout << std::endl; TensorFromVector(init, ctx, tensor_x); tensor_x->Resize({num, num}); ctx.Wait(); auto place = ctx.GetPlace(); auto out = scope->Var("OutData"); auto tensor_out = out->GetMutable(); tensor_out->Resize({num, num}); tensor_out->mutable_data(place); // allocate ctx.Wait(); // run f::AttributeMap attrs; attrs["tag"] = std::string("tagx"); attrs["root"] = 0; attrs["ring_id"] = 0; auto op = f::OpRegistry::CreateOp("c_broadcast", {{"X", {"Data"}}}, {{"Out", {"OutData"}}}, attrs); op->Run(*scope, place); // comm sync auto sync_op = f::OpRegistry::CreateOp( "c_sync_comm_stream", {{"X", {"Data"}}}, {{"Out", {"OutData"}}}, attrs); sync_op->Run(*scope, place); // ctx.Wait(); std::vector out_vec; TensorToVector(*tensor_out, ctx, &out_vec); EXPECT_EQ(out_vec.size(), init.size()); for (uint32_t i = 0; i < out_vec.size(); i++) { EXPECT_EQ(out_vec[i], 1.0); } } TEST(c_sync_comm_stream_op, NPU) { f::Scope scope; HcclRootInfo hccl_id; // only support one device, if more than one device, use first default p::NPUDeviceContext ctx(p::NPUPlace(atoi(FLAGS_selected_npus.c_str()))); PrepareUniqueId(&scope, ctx, &hccl_id); Prepare(&scope, ctx, &hccl_id); TestHCCLBroadcastOp(&scope, ctx); }