diff --git a/paddle/fluid/operators/collective/recv_v2_op.cu.cc b/paddle/fluid/operators/collective/recv_v2_op.cu.cc index a10257b45b300112dca22ffe951193526a9a7eb5..bfa12f911946d4b3eb17c99ce75caba3ba436c64 100644 --- a/paddle/fluid/operators/collective/recv_v2_op.cu.cc +++ b/paddle/fluid/operators/collective/recv_v2_op.cu.cc @@ -40,24 +40,24 @@ framework::DDim recv_shape_info(const platform::Place &place, "to send the shape info.")); } - phi::DataType shape_dytpe = phi::DataType::INT32; + phi::DataType shape_dtype = phi::DataType::INT32; ncclDataType_t nccl_dtype = - platform::ToNCCLDataType(framework::TransToProtoVarType(shape_dytpe)); + platform::ToNCCLDataType(framework::TransToProtoVarType(shape_dtype)); // step1: recv the shape size - phi::DenseTensor gpu_shape_size_tensor(shape_dytpe); + phi::DenseTensor gpu_shape_size_tensor(shape_dtype); if (!group) { gpu_shape_size_tensor.Resize({1}); - gpu_shape_size_tensor.mutable_data(place, shape_dytpe); + gpu_shape_size_tensor.mutable_data(place, shape_dtype); auto *gpu_data = gpu_shape_size_tensor.data(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRecv( gpu_data, 1, nccl_dtype, peer, comm->comm(), stream)); } // copy the shape size tensor to cpu - phi::DenseTensor *cpu_shape_size_tensor = new phi::DenseTensor(shape_dytpe); + phi::DenseTensor *cpu_shape_size_tensor = new phi::DenseTensor(shape_dtype); cpu_shape_size_tensor->Resize({1}); - cpu_shape_size_tensor->mutable_data(platform::CPUPlace(), shape_dytpe); + cpu_shape_size_tensor->mutable_data(platform::CPUPlace(), shape_dtype); if (group) { std::vector shape_size_tensor; shape_size_tensor.emplace_back(*cpu_shape_size_tensor); @@ -71,19 +71,19 @@ framework::DDim recv_shape_info(const platform::Place &place, VLOG(3) << "recv the shape size: " << shape_size << " from peer"; // step2: recv the shape - phi::DenseTensor gpu_shape_tensor(shape_dytpe); + phi::DenseTensor gpu_shape_tensor(shape_dtype); if (!group) { gpu_shape_tensor.Resize({shape_size}); - gpu_shape_tensor.mutable_data(place, shape_dytpe); + gpu_shape_tensor.mutable_data(place, shape_dtype); auto *gpu_shape_data = gpu_shape_tensor.data(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRecv( gpu_shape_data, shape_size, nccl_dtype, peer, comm->comm(), stream)); } // copy the shape tensor to cpu - phi::DenseTensor *cpu_shape_tensor = new phi::DenseTensor(shape_dytpe); + phi::DenseTensor *cpu_shape_tensor = new phi::DenseTensor(shape_dtype); cpu_shape_tensor->Resize({shape_size}); - cpu_shape_tensor->mutable_data(platform::CPUPlace(), shape_dytpe); + cpu_shape_tensor->mutable_data(platform::CPUPlace(), shape_dtype); if (group) { std::vector shape_tensor; shape_tensor.emplace_back(*cpu_shape_tensor); diff --git a/paddle/fluid/operators/collective/send_v2_op.cu.cc b/paddle/fluid/operators/collective/send_v2_op.cu.cc index 7c1ab8ace3404d68c9e419ba45b30633d072fcf7..adea7db0b8088e61583f88c65a3b4f386177b5cd 100644 --- a/paddle/fluid/operators/collective/send_v2_op.cu.cc +++ b/paddle/fluid/operators/collective/send_v2_op.cu.cc @@ -39,16 +39,16 @@ void send_shape_info(const phi::DenseTensor& x, "NCCLComm and Stream should be provided if use NCCL " "to send the shape info.")); } - phi::DataType shape_dytpe = phi::DataType::INT32; + phi::DataType shape_dtype = phi::DataType::INT32; ncclDataType_t nccl_dtype = - platform::ToNCCLDataType(framework::TransToProtoVarType(shape_dytpe)); + platform::ToNCCLDataType(framework::TransToProtoVarType(shape_dtype)); auto dims = x.dims(); int shape_size = dims.size(); // step1: send the shape size - phi::DenseTensor cpu_shape_size_tensor(shape_dytpe); + phi::DenseTensor cpu_shape_size_tensor(shape_dtype); cpu_shape_size_tensor.Resize({1}); - cpu_shape_size_tensor.mutable_data(platform::CPUPlace(), shape_dytpe); + cpu_shape_size_tensor.mutable_data(platform::CPUPlace(), shape_dtype); auto* cpu_data = cpu_shape_size_tensor.data(); cpu_data[0] = shape_size; @@ -58,9 +58,9 @@ void send_shape_info(const phi::DenseTensor& x, auto shape_size_task = group->Send(shape_size_tensor, peer); } else { // copy the shape size tensor to gpu and send - phi::DenseTensor* gpu_shape_size_tensor = new phi::DenseTensor(shape_dytpe); + phi::DenseTensor* gpu_shape_size_tensor = new phi::DenseTensor(shape_dtype); gpu_shape_size_tensor->Resize({1}); - gpu_shape_size_tensor->mutable_data(place, shape_dytpe); + gpu_shape_size_tensor->mutable_data(place, shape_dtype); framework::TensorCopySync( cpu_shape_size_tensor, place, gpu_shape_size_tensor); PADDLE_ENFORCE_GPU_SUCCESS( @@ -74,9 +74,9 @@ void send_shape_info(const phi::DenseTensor& x, VLOG(3) << "send the shape size: " << shape_size << " to peer"; // step2: send the shape - phi::DenseTensor cpu_shape_tensor(shape_dytpe); + phi::DenseTensor cpu_shape_tensor(shape_dtype); cpu_shape_tensor.Resize({shape_size}); - cpu_shape_tensor.mutable_data(platform::CPUPlace(), shape_dytpe); + cpu_shape_tensor.mutable_data(platform::CPUPlace(), shape_dtype); auto* cpu_shape_data = cpu_shape_tensor.data(); for (int i = 0; i < shape_size; ++i) { cpu_shape_data[i] = dims[i]; @@ -88,9 +88,9 @@ void send_shape_info(const phi::DenseTensor& x, auto shape_task = group->Send(shape_tensor, peer); } else { // copy the shape tensor to gpu and send - phi::DenseTensor* gpu_shape_tensor = new phi::DenseTensor(shape_dytpe); + phi::DenseTensor* gpu_shape_tensor = new phi::DenseTensor(shape_dtype); gpu_shape_tensor->Resize({shape_size}); - gpu_shape_tensor->mutable_data(place, shape_dytpe); + gpu_shape_tensor->mutable_data(place, shape_dtype); framework::TensorCopySync(cpu_shape_tensor, place, gpu_shape_tensor); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::ncclSend(gpu_shape_tensor->data(),