ProcessGroupNCCL.cc 23.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/distributed/collective/ProcessGroupNCCL.h"
16

L
lilong12 已提交
17
#include "paddle/fluid/distributed/collective/Common.h"
18
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
19
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
B
Baibaifan 已提交
20 21 22
#include "paddle/fluid/platform/place.h"
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/common/place.h"
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38

DECLARE_bool(nccl_blocking_wait);
DECLARE_bool(use_stream_safe_cuda_allocator);

constexpr int64_t kWaitBlockTImeout = 10;

namespace paddle {
namespace distributed {

void SyncDefaultStream(
    const std::vector<Place>& places,
    std::vector<EventManager>& ncclEvents,                       // NOLINT
    std::vector<std::unique_ptr<CUDADeviceContext>>& dev_ctx) {  // NOLINT
  for (size_t i = 0; i < places.size(); ++i) {
    auto* default_ctx = static_cast<platform::CUDADeviceContext*>(
        platform::DeviceContextPool::Instance().Get(places[i]));
39 40
    ncclEvents[i].Record(*default_ctx);
    ncclEvents[i].Block(*dev_ctx[i]);
41 42 43 44
  }
}

std::shared_ptr<ProcessGroupNCCL::NCCLTask> ProcessGroupNCCL::CreateTask(
45 46 47
    std::vector<Place> places,
    int rank,
    CommType comm_type,
48
    const std::vector<phi::DenseTensor>& inputs) {
49 50
  return std::make_shared<ProcessGroupNCCL::NCCLTask>(
      places, rank, comm_type, inputs);
51 52
}

53
ProcessGroupNCCL::NCCLTask::NCCLTask(
54 55 56
    const std::vector<Place>& places,
    int rank,
    CommType CommType,
57
    const std::vector<phi::DenseTensor>& inputs)
58 59 60 61 62 63 64 65
    : Task(rank, inputs, CommType), places_(places) {
  control_events_.resize(places.size());
  ncclComms_.resize(places.size());
}

ProcessGroupNCCL::NCCLTask::~NCCLTask() {}

void ProcessGroupNCCL::NCCLTask::SetOutputs(
66 67
    std::vector<phi::DenseTensor>& outputs) {  // NOLINT
  outputs_ = std::make_shared<std::vector<phi::DenseTensor>>(outputs);
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
}

void ProcessGroupNCCL::NCCLTask::SynchronizeStreams() {
  for (size_t i = 0; i < places_.size(); ++i) {
    auto* default_ctx = static_cast<platform::CUDADeviceContext*>(
        platform::DeviceContextPool::Instance().Get(places_[i]));
    default_ctx->WaitEvent(control_events_[i].GetRawCudaEvent());
  }
}

bool ProcessGroupNCCL::NCCLTask::IsCompleted() {
  for (size_t i = 0; i < places_.size(); ++i) {
    if (!control_events_[i].Query()) {
      return false;
    }
  }

  return true;
}

// TODO(sheniang03): Add timeout for wait, now timeout unused
bool ProcessGroupNCCL::NCCLTask::Wait(std::chrono::milliseconds timeout) {
  SynchronizeStreams();
  if (FLAGS_nccl_blocking_wait) {
    // NOTE(shenliang03): It will block host for sync
    while (!IsCompleted()) {
      std::this_thread::sleep_for(std::chrono::milliseconds(kWaitBlockTImeout));
    }
  }
B
Baibaifan 已提交
97 98 99 100 101

  if (!barrierTensors_.empty()) {
    // If we use the work to do barrier, we should block cpu
    for (auto& place : places_) {
      platform::CUDADeviceGuard gpuGuard(place);
S
ShenLiang 已提交
102
#ifdef PADDLE_WITH_CUDA
B
Baibaifan 已提交
103
      PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize());
S
ShenLiang 已提交
104 105 106
#else
      PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize());
#endif
B
Baibaifan 已提交
107 108
    }
  }
109 110 111 112 113 114
  return true;
}

// Same as Wait
void ProcessGroupNCCL::NCCLTask::Synchronize() { Wait(kWaitTimeout); }

115
ProcessGroupNCCL::ProcessGroupNCCL(const std::shared_ptr<Store>& store,
116 117 118 119
                                   int rank,
                                   int size,
                                   const platform::Place& place,
                                   int gid)
120 121 122
    : ProcessGroup(rank, size, place, gid), store_(store) {
  platform::SetDeviceId(place_.device);
}
123 124 125

void ProcessGroupNCCL::BroadcastUniqueNCCLID(
    std::vector<ncclUniqueId>& nccl_ids) {  // NOLINT
126 127
  if (rank_ == 0) {
    for (size_t i = 0; i < nccl_ids.size(); i++) {
128 129
      auto key = "ProcessGroupNCCL/nccl_ids/" + std::to_string(gid_) + "/" +
                 std::to_string(i);
130 131 132 133 134 135 136
      auto nccl_id = std::vector<uint8_t>(
          reinterpret_cast<uint8_t*>(&nccl_ids[i]),
          reinterpret_cast<uint8_t*>(&nccl_ids[i]) + NCCL_UNIQUE_ID_BYTES);
      store_->set(key, nccl_id);
    }
  } else {
    for (size_t i = 0; i < nccl_ids.size(); i++) {
137 138
      auto key = "ProcessGroupNCCL/nccl_ids/" + std::to_string(gid_) + "/" +
                 std::to_string(i);
139 140 141
      auto ret = store_->get(key);
      std::memcpy(&nccl_ids[i], ret.data(), ret.size());
    }
142 143 144 145 146 147
  }
}

// create NCCLManager cache for places_key
void ProcessGroupNCCL::CreateNCCLManagerCache(
    const std::string& places_key, const std::vector<Place>& places) {
148 149
  PADDLE_ENFORCE_EQ(places_key.empty(),
                    false,
150 151 152 153 154 155 156 157 158 159 160 161
                    platform::errors::PreconditionNotMet(
                        "Not able to create/get the NCCL Communicator since "
                        "the GPU place are not known"));

  std::vector<std::shared_ptr<NCCLCommManager>> nccl_comms;
  nccl_comms.resize(places.size());

  // using vector just for broadcast
  std::vector<ncclUniqueId> nccl_ids;
  nccl_ids.resize(1);
  auto& nccl_id = nccl_ids.front();

B
Baibaifan 已提交
162 163 164 165
  for (auto& place : places) {
    used_place_ids_.insert(place.GetDeviceId());
  }

166 167 168 169 170
  if (rank_ == 0) {
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGetUniqueId(&nccl_id));
  }
  BroadcastUniqueNCCLID(nccl_ids);

171 172
  VLOG(3) << "init nccl rank: " << rank_ << ", nranks: " << size_
          << ", place: " << places_key
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
          << ", nccl uniqueid: " << SerializeNCCLUniqueId(nccl_id);

  std::vector<std::unique_ptr<CUDADeviceContext>> dev_ctx;
  dev_ctx.resize(places.size());

  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart());

  for (size_t i = 0; i < places.size(); ++i) {
    platform::CUDADeviceGuard guard(places[i]);
    nccl_comms[i] = NCCLCommManager::Create(GetSize(), GetRank(), nccl_id);
    dev_ctx[i].reset(new CUDADeviceContext(places[i]));
  }

  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd());

  std::vector<EventManager> events;
  events.resize(places.size());

  // These caches will be useful to process sync/wait/communicate
  places_to_events_.emplace(places_key, std::move(events));
  places_to_ncclcomm_.emplace(places_key, std::move(nccl_comms));
  places_to_ctx_.emplace(places_key, std::move(dev_ctx));
}

template <typename Fn>
std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Collective(
199
    std::vector<phi::DenseTensor>& inputs,
200 201 202
    std::vector<phi::DenseTensor>& outputs,
    Fn fn,
    CommType op_type) {
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
  const auto places = GetPlaceList(inputs);
  const auto key = GetKeyFromPlaces(places);

  {
    std::lock_guard<std::mutex> lock(mutex_);
    if (places_to_ncclcomm_.find(key) == places_to_ncclcomm_.end()) {
      CreateNCCLManagerCache(key, places);
    }
  }

  auto& nccl_comms = places_to_ncclcomm_[key];

  SyncDefaultStream(places, places_to_events_[key], places_to_ctx_[key]);

  auto task = CreateTask(places, rank_, op_type, inputs);
  task->SetOutputs(outputs);

  // construct uninitialize guard for device
  platform::CUDADeviceGuard cuda_guard;

  if (FLAGS_use_stream_safe_cuda_allocator) {
    for (size_t i = 0; i < inputs.size(); ++i) {
      cuda_guard.SetDevice(places[i]);
226
      memory::RecordStream(inputs[i].Holder(),
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
                           places_to_ctx_[key][i]->stream());
    }
  }

  {
    platform::NCCLGroupGuard nccl_guard;
    for (size_t i = 0; i < inputs.size(); ++i) {
      cuda_guard.SetDevice(places[i]);
      const auto& nccl_stream = places_to_ctx_[key][i]->stream();
      fn(inputs[i], outputs[i], nccl_comms[i]->GetNcclComm(), nccl_stream);
    }
  }

  for (size_t i = 0; i < inputs.size(); ++i) {
    cuda_guard.SetDevice(places[i]);
    task->control_events_[i].Record(*places_to_ctx_[key][i]);
  }
  return task;
}

247 248
template <typename Fn>
void ProcessGroupNCCL::Collective(const phi::DenseTensor* in,
249 250
                                  phi::DenseTensor* out,
                                  Fn fn,
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
                                  CommType op_type) {
  std::vector<Place> places;
  places.push_back(in->place());
  const auto key = GetKeyFromPlaces(places);

  {
    std::lock_guard<std::mutex> lock(mutex_);
    if (places_to_ncclcomm_.find(key) == places_to_ncclcomm_.end()) {
      CreateNCCLManagerCache(key, places);
    }
  }

  auto& nccl_comms = places_to_ncclcomm_[key];

  SyncDefaultStream(places, places_to_events_[key], places_to_ctx_[key]);

  // construct uninitialize guard for device
  platform::CUDADeviceGuard cuda_guard;

  if (FLAGS_use_stream_safe_cuda_allocator) {
    cuda_guard.SetDevice(places[0]);
    memory::RecordStream(in->Holder(), places_to_ctx_[key][0]->stream());
  }

  {
    platform::NCCLGroupGuard nccl_guard;
    cuda_guard.SetDevice(places[0]);
    const auto& nccl_stream = places_to_ctx_[key][0]->stream();
    fn(in, out, nccl_comms[0]->GetNcclComm(), nccl_stream);
  }

  cuda_guard.SetDevice(places[0]);
}

B
Baibaifan 已提交
285 286
template <typename Fn>
std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::PointToPoint(
287 288 289
    std::vector<phi::DenseTensor>& tensors,
    Fn fn,
    int dst_rank,
290
    CommType op_type) {
B
Baibaifan 已提交
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
  const auto places = GetPlaceList(tensors);
  const auto key = GetKeyFromPlaces(places);

  {
    std::lock_guard<std::mutex> lock(mutex_);
    if (places_to_ncclcomm_.find(key) == places_to_ncclcomm_.end()) {
      CreateNCCLManagerCache(key, places);
    }
  }

  auto& nccl_comms = places_to_ncclcomm_[key];

  SyncDefaultStream(places, places_to_events_[key], places_to_ctx_[key]);

  auto task = CreateTask(places, rank_, op_type, tensors);

  // construct uninitialize guard for device
  platform::CUDADeviceGuard cuda_guard;

  if (FLAGS_use_stream_safe_cuda_allocator) {
    for (size_t i = 0; i < tensors.size(); ++i) {
      cuda_guard.SetDevice(places[i]);
313
      memory::RecordStream(tensors[i].Holder(),
B
Baibaifan 已提交
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
                           places_to_ctx_[key][i]->stream());
    }
  }

  {
    platform::NCCLGroupGuard nccl_guard;
    for (size_t i = 0; i < tensors.size(); ++i) {
      cuda_guard.SetDevice(places[i]);
      const auto& nccl_stream = places_to_ctx_[key][i]->stream();
      fn(tensors[i], nccl_comms[i]->GetNcclComm(), nccl_stream, dst_rank);
    }
  }

  for (size_t i = 0; i < tensors.size(); ++i) {
    cuda_guard.SetDevice(places[i]);
    task->control_events_[i].Record(*places_to_ctx_[key][i]);
  }
  return task;
}

334
std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::AllReduce(
335
    std::vector<phi::DenseTensor>& in_tensors,
336 337
    std::vector<phi::DenseTensor>& out_tensors,
    const AllreduceOptions& opts) {
338
  PADDLE_ENFORCE_EQ(
339 340
      CheckTensorsInCudaPlace(in_tensors),
      true,
341
      platform::errors::InvalidArgument("All inputs should be in CudaPlace."));
342
  return Collective(
343 344 345 346 347 348
      in_tensors,
      out_tensors,
      [&](const phi::DenseTensor& input,
          phi::DenseTensor& output,
          ncclComm_t comm,
          const gpuStream_t& stream) {
349
        return platform::dynload::ncclAllReduce(
350 351 352
            input.data(),
            output.data(),
            input.numel(),
353
            platform::ToNCCLDataType(input.type()),
354 355 356
            ToNCCLRedType(opts.reduce_op),
            comm,
            stream);
357 358
      },
      CommType::ALLREDUCE);
359 360 361
}

std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Broadcast(
362
    std::vector<phi::DenseTensor>& in_tensors,
363 364
    std::vector<phi::DenseTensor>& out_tensors,
    const BroadcastOptions& opts) {
365
  PADDLE_ENFORCE_EQ(
366 367
      CheckTensorsInCudaPlace(in_tensors),
      true,
368 369
      platform::errors::InvalidArgument("All inputs should be in CudaPlace."));

370
  return Collective(
371 372 373 374 375
      in_tensors,
      out_tensors,
      [&](phi::DenseTensor& input,
          phi::DenseTensor& output,
          ncclComm_t comm,
376 377 378 379
          const gpuStream_t& stream) {
        const auto root =
            opts.source_rank * in_tensors.size() + opts.source_root;
        return platform::dynload::ncclBroadcast(
380 381 382 383 384 385 386
            input.data(),
            output.data(),
            input.numel(),
            platform::ToNCCLDataType(input.type()),
            root,
            comm,
            stream);
387 388
      },
      CommType::BROADCAST);
389 390
}

B
Baibaifan 已提交
391 392
std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Barrier(
    const BarrierOptions& opts) {
B
Baibaifan 已提交
393 394
  // Only support single card single process
  std::vector<phi::GPUPlace> places = {place_};
B
Baibaifan 已提交
395

396
  std::vector<phi::DenseTensor> barrierTensors;
B
Baibaifan 已提交
397 398 399 400 401
  barrierTensors.reserve(places.size());

  platform::CUDADeviceGuard gpuGuard;
  for (auto& place : places) {
    gpuGuard.SetDeviceIndex(place.GetDeviceId());
B
Baibaifan 已提交
402
    auto dt = full({1}, 0, phi::DataType::FLOAT32, place);
403 404
    barrierTensors.push_back(
        *std::dynamic_pointer_cast<phi::DenseTensor>(dt.impl()));
B
Baibaifan 已提交
405
  }
406
  auto task = ProcessGroupNCCL::AllReduce(barrierTensors, barrierTensors);
B
Baibaifan 已提交
407 408 409 410 411
  auto nccl_task = dynamic_cast<ProcessGroupNCCL::NCCLTask*>(task.get());
  nccl_task->barrierTensors_ = std::move(barrierTensors);
  return task;
}

412 413
void CheckTensorsInDifferentDevices(
    const std::vector<phi::DenseTensor>& tensors, const size_t num_devices) {
B
Baibaifan 已提交
414
  PADDLE_ENFORCE_EQ(
415 416
      tensors.size() == 0,
      false,
B
Baibaifan 已提交
417 418
      platform::errors::InvalidArgument("Tensor list must be nonempty."));
  PADDLE_ENFORCE_LE(
419 420
      tensors.size(),
      num_devices,
B
Baibaifan 已提交
421 422 423 424 425 426
      platform::errors::InvalidArgument(
          "Tensor list mustn't be larger than the number of available GPUs."));

  std::set<Place> used_devices;

  for (const auto& t : tensors) {
427 428
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(t.place()),
                      true,
B
Baibaifan 已提交
429 430 431
                      platform::errors::InvalidArgument(
                          "Tensors must be CUDA and dense tensor."));

432
    const auto inserted = used_devices.insert(t.place()).second;
433 434
    PADDLE_ENFORCE_EQ(inserted,
                      true,
B
Baibaifan 已提交
435 436 437 438 439 440
                      platform::errors::InvalidArgument(
                          "Tensors must be on distinct GPU devices."));
  }
}

std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Send(
441
    std::vector<phi::DenseTensor>& tensors, int dst_rank) {
B
Baibaifan 已提交
442 443
  CheckTensorsInDifferentDevices(tensors, static_cast<size_t>(GetSize()));

444 445
  auto task = PointToPoint(
      tensors,
446 447 448
      [&](phi::DenseTensor& input,
          ncclComm_t comm,
          const gpuStream_t& stream,
449 450
          int dst_rank) {
        return platform::dynload::ncclSend(
451 452 453 454 455 456
            input.data(),
            input.numel(),
            platform::ToNCCLDataType(input.dtype()),
            dst_rank,
            comm,
            stream);
457
      },
458 459
      dst_rank,
      CommType::SEND);
B
Baibaifan 已提交
460 461 462 463
  return task;
}

std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Recv(
464
    std::vector<phi::DenseTensor>& tensors, int src_rank) {
B
Baibaifan 已提交
465 466
  CheckTensorsInDifferentDevices(tensors, static_cast<size_t>(GetSize()));

467 468
  auto task = PointToPoint(
      tensors,
469 470 471
      [&](phi::DenseTensor& output,
          ncclComm_t comm,
          const gpuStream_t& stream,
472 473
          int src_rank) {
        return platform::dynload::ncclRecv(
474 475 476 477 478 479
            output.data(),
            output.numel(),
            platform::ToNCCLDataType(output.dtype()),
            src_rank,
            comm,
            stream);
480
      },
481 482
      src_rank,
      CommType::RECV);
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
  return task;
}

std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Send_Partial(
    phi::DenseTensor& tensors, int dst_rank, int offset, int length) {
  // CheckTensorsInDifferentDevices(tensors, static_cast<size_t>(GetSize()));

  phi::DenseTensor flatten_tensor;
  flatten_tensor.ShareDataWith(tensors).Resize({tensors.numel()});

  phi::DenseTensor shared_input = flatten_tensor.Slice(offset, offset + length);

  std::vector<phi::DenseTensor> shared_tensors;
  shared_tensors.push_back(shared_input);

498 499
  auto task = PointToPoint(
      shared_tensors,
500 501 502
      [&](phi::DenseTensor& input,
          ncclComm_t comm,
          const gpuStream_t& stream,
503 504
          int dst_rank) {
        return platform::dynload::ncclSend(
505 506 507 508 509 510
            input.data(),
            input.numel(),
            platform::ToNCCLDataType(input.dtype()),
            dst_rank,
            comm,
            stream);
511
      },
512 513
      dst_rank,
      CommType::SEND);
514 515 516 517 518 519 520 521 522 523 524 525 526 527
  return task;
}

std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Recv_Partial(
    phi::DenseTensor& tensors, int src_rank, int offset, int length) {
  // phi::DenseTensor shared_input = tensors.Slice(offset, offset+length);

  phi::DenseTensor flatten_tensor;
  flatten_tensor.ShareDataWith(tensors).Resize({tensors.numel()});
  phi::DenseTensor shared_input = flatten_tensor.Slice(offset, offset + length);

  std::vector<phi::DenseTensor> shared_tensors;
  shared_tensors.push_back(shared_input);

528 529
  auto task = PointToPoint(
      shared_tensors,
530 531 532
      [&](phi::DenseTensor& output,
          ncclComm_t comm,
          const gpuStream_t& stream,
533 534
          int src_rank) {
        return platform::dynload::ncclRecv(
535 536 537 538 539 540
            output.data(),
            output.numel(),
            platform::ToNCCLDataType(output.dtype()),
            src_rank,
            comm,
            stream);
541
      },
542 543
      src_rank,
      CommType::RECV);
B
Baibaifan 已提交
544 545 546
  return task;
}

547
std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::AllGather(
548 549
    std::vector<phi::DenseTensor>& in_tensors,
    std::vector<phi::DenseTensor>& out_tensors) {
550
  PADDLE_ENFORCE_EQ(
551 552
      CheckTensorsInCudaPlace(in_tensors),
      true,
553 554
      platform::errors::InvalidArgument("All inputs should be in CudaPlace."));
  PADDLE_ENFORCE_EQ(
555 556
      CheckTensorsInCudaPlace(out_tensors),
      true,
557
      platform::errors::InvalidArgument("All outputs should be in CudaPlace."));
558
  return Collective(
559 560 561 562 563 564
      in_tensors,
      out_tensors,
      [&](const phi::DenseTensor& input,
          phi::DenseTensor& output,
          ncclComm_t comm,
          const gpuStream_t& stream) {
565
        return platform::dynload::ncclAllGather(
566 567 568 569 570 571
            input.data(),
            output.data(),
            input.numel(),
            platform::ToNCCLDataType(input.dtype()),
            comm,
            stream);
572 573
      },
      CommType::ALLGATHER);
574 575
}

576 577
void* GetPointerByOffset(void* raw_pointer,
                         size_t offset,
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
                         experimental::DataType type) {
  if (type == experimental::DataType::FLOAT32) {
    return reinterpret_cast<void*>(reinterpret_cast<float*>(raw_pointer) +
                                   offset);
  } else if (type == experimental::DataType::FLOAT64) {
    return reinterpret_cast<void*>(reinterpret_cast<double*>(raw_pointer) +
                                   offset);
  } else if (type == experimental::DataType::INT32) {
    return reinterpret_cast<void*>(reinterpret_cast<int32_t*>(raw_pointer) +
                                   offset);
  } else if (type == experimental::DataType::INT64) {
    return reinterpret_cast<void*>(reinterpret_cast<int64_t*>(raw_pointer) +
                                   offset);
  } else if (type == experimental::DataType::FLOAT16) {
    return reinterpret_cast<void*>(reinterpret_cast<int16_t*>(raw_pointer) +
                                   offset);
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "This datatype in nccl is not supported."));
  }
598
  return nullptr;
599 600 601
}

std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::AllToAll(
602 603
    std::vector<phi::DenseTensor>& in_tensors,
    std::vector<phi::DenseTensor>& out_tensors) {
604
  PADDLE_ENFORCE_EQ(
605 606
      CheckTensorsInCudaPlace(in_tensors),
      true,
607 608
      platform::errors::InvalidArgument("All inputs should be in CudaPlace."));
  PADDLE_ENFORCE_EQ(
609 610
      CheckTensorsInCudaPlace(out_tensors),
      true,
611 612
      platform::errors::InvalidArgument("All inputs should be in CudaPlace."));
  return Collective(
613 614 615 616 617
      in_tensors,
      out_tensors,
      [&](phi::DenseTensor& input,
          phi::DenseTensor& output,
          ncclComm_t comm,
618 619 620 621 622
          const gpuStream_t& stream) {
        size_t offset = 0;
        PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart());
        for (auto i = 0; i < size_; i++) {
          PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclSend(
623
              GetPointerByOffset(input.data(), offset, input.dtype()),
624 625 626 627 628
              input.numel() / size_,
              platform::ToNCCLDataType(input.dtype()),
              i,
              comm,
              stream));
629
          PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRecv(
630
              GetPointerByOffset(output.data(), offset, input.dtype()),
631 632 633 634 635
              input.numel() / size_,
              platform::ToNCCLDataType(input.dtype()),
              i,
              comm,
              stream));
636
          offset += input.numel() / size_;
637 638 639 640 641 642 643
        }
        PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd());
      },
      CommType::ALLREDUCE);
}

std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Reduce(
644
    std::vector<phi::DenseTensor>& in_tensors,
645 646
    std::vector<phi::DenseTensor>& out_tensors,
    const ReduceOptions& opts) {
647
  PADDLE_ENFORCE_EQ(
648 649
      CheckTensorsInCudaPlace(in_tensors),
      true,
650 651
      platform::errors::InvalidArgument("All inputs should be in CudaPlace."));
  return Collective(
652 653 654 655 656 657
      in_tensors,
      out_tensors,
      [&](const phi::DenseTensor& input,
          phi::DenseTensor& output,
          ncclComm_t comm,
          const gpuStream_t& stream) {
658
        PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclReduce(
659 660 661
            input.data(),
            output.data(),
            input.numel(),
662
            platform::ToNCCLDataType(input.dtype()),
663 664 665 666
            ToNCCLRedType(opts.reduce_op),
            opts.root_rank,
            comm,
            stream));
667 668 669 670 671
      },
      CommType::REDUCE);
}

std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Scatter(
672
    std::vector<phi::DenseTensor>& in_tensors,
673 674
    std::vector<phi::DenseTensor>& out_tensors,
    const ScatterOptions& opts) {
675
  PADDLE_ENFORCE_EQ(
676 677
      CheckTensorsInCudaPlace(in_tensors),
      true,
678 679
      platform::errors::InvalidArgument("All inputs should be in CudaPlace."));
  PADDLE_ENFORCE_EQ(
680 681
      CheckTensorsInCudaPlace(out_tensors),
      true,
682 683
      platform::errors::InvalidArgument("All inputs should be in CudaPlace."));
  return Collective(
684 685 686 687 688
      in_tensors,
      out_tensors,
      [&](phi::DenseTensor& input,
          phi::DenseTensor& output,
          ncclComm_t comm,
689 690 691 692 693 694
          const gpuStream_t& stream) {
        size_t offset = 0;
        if (rank_ == opts.root_rank) {
          PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupStart());
          for (auto i = 0; i < size_; i++) {
            PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclSend(
695
                GetPointerByOffset(input.data(), offset, input.dtype()),
696 697 698 699 700
                input.numel() / size_,
                platform::ToNCCLDataType(input.dtype()),
                i,
                comm,
                stream));
701
            offset += input.numel() / size_;
702 703
          }
          PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRecv(
704 705 706 707 708
              output.data(),
              input.numel() / size_,
              platform::ToNCCLDataType(input.dtype()),
              opts.root_rank,
              comm,
709 710 711 712
              stream));
          PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGroupEnd());
        } else {
          PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRecv(
713 714 715 716 717
              output.data(),
              input.numel() / size_,
              platform::ToNCCLDataType(input.dtype()),
              opts.root_rank,
              comm,
718 719 720 721 722 723
              stream));
        }
      },
      CommType::SCATTER);
}

724 725
}  //  namespace distributed
}  //  namespace paddle