c_allreduce_op.h 8.5 KB
Newer Older
1
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
16 17

#include <string>
18 19 20 21

#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
22 23
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memory.h"
24

25
#if defined(PADDLE_WITH_NCCL)
26 27 28 29
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#endif

30 31 32 33 34
#if defined(PADDLE_WITH_GLOO)
#include <gloo/allreduce.h>
#include "paddle/fluid/framework/fleet/gloo_wrapper.h"
#endif

35 36 37 38 39
#if defined(PADDLE_WITH_ASCEND_CL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/hccl_helper.h"
#endif

40 41 42
namespace paddle {
namespace operators {

43 44 45 46 47 48 49 50 51 52 53 54 55
enum ReduceType { kRedSum, kRedMax, kRedMin, kRedProd };

class CAllReduceOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
56 57
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace());
58 59 60 61 62 63 64
  }
};

template <ReduceType red_type, typename T>
class CAllReduceOpCPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
#if defined(PADDLE_WITH_GLOO)
    auto in = ctx.Input<framework::Tensor>("X");
    auto out = ctx.Output<framework::Tensor>("Out");

    auto place = ctx.GetPlace();
    int64_t send_numel = in->numel();
    const T* send_buff = in->data<T>();
    T* recv_buff = out->mutable_data<T>(in->dims(), place);
    auto gloo = paddle::framework::GlooWrapper::GetInstance();
    PADDLE_ENFORCE_EQ(
        gloo->IsInitialized(), true,
        platform::errors::PreconditionNotMet(
            "You must initialize the gloo environment first to use it."));
    gloo::AllreduceOptions opts(gloo->GetContext());
    opts.setInput(const_cast<T*>(send_buff), send_numel);
    opts.setOutput(recv_buff, send_numel);
    switch (red_type) {
      case kRedSum:
        opts.setReduceFunction(
            static_cast<void (*)(void*, const void*, const void*, size_t)>(
                &gloo::sum<T>));
        break;
      case kRedMax:
        opts.setReduceFunction(
            static_cast<void (*)(void*, const void*, const void*, size_t)>(
                &gloo::max<T>));
        break;
      case kRedMin:
        opts.setReduceFunction(
            static_cast<void (*)(void*, const void*, const void*, size_t)>(
                &gloo::min<T>));
        break;
      case kRedProd:
        opts.setReduceFunction(
            static_cast<void (*)(void*, const void*, const void*, size_t)>(
                &gloo::product<T>));
        break;
      default:
        PADDLE_ENFORCE_EQ(true, false,
                          platform::errors::InvalidArgument(
                              "Invalid reduce type: %d.", red_type));
    }
    gloo::allreduce(opts);
#else
    PADDLE_THROW(platform::errors::Unavailable(
        "PaddlePaddle should compile with GLOO by setting WITH_GLOO=ON"));
#endif
112 113 114
  }
};

115 116 117 118 119 120 121 122
template <ReduceType red_type, typename T>
class CAllReduceOpASCENDKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
#if defined(PADDLE_WITH_ASCEND_CL)
    auto in = ctx.Input<framework::LoDTensor>("X");
    auto out = ctx.Output<framework::LoDTensor>("Out");
    auto place = ctx.GetPlace();
L
lw921014 已提交
123
    HcclDataType dtype = platform::ToHCCLDataType(in->type());
124 125
    int64_t numel = in->numel();

L
lw921014 已提交
126 127
    void* sendbuff = reinterpret_cast<void*>(const_cast<T*>(in->data<T>()));
    void* recvbuff = reinterpret_cast<void*>(out->data<T>());
128

129 130
    int ring_id = ctx.Attr<int>("ring_id");
    std::string group = std::string(HCOM_GROUP_PREFIX) + std::to_string(ring_id);
131
    auto comm = paddle::platform::HCCLCommContext::Instance().Get(ring_id, place);
132 133

    aclrtStream stream = nullptr;
134
    auto dev_ctx = platform::DeviceContextPool::Instance().Get(place);
135 136 137 138 139 140
    if (ctx.Attr<bool>("use_calc_stream")) {
      stream = static_cast<platform::NPUDeviceContext*>(dev_ctx)->stream();
    } else {
      stream = comm->stream();
    }

L
lw921014 已提交
141
    HcclReduceOp hccl_red_type = HCCL_REDUCE_SUM;
142 143
    switch (red_type) {
      case kRedSum:
L
lw921014 已提交
144
        hccl_red_type = HCCL_REDUCE_SUM;
145 146 147
        break;

      case kRedMax:
L
lw921014 已提交
148
        hccl_red_type = HCCL_REDUCE_MAX;
149 150 151
        break;

      case kRedMin:
L
lw921014 已提交
152
        hccl_red_type = HCCL_REDUCE_MIN;
153 154 155
        break;

      case kRedProd:
L
lw921014 已提交
156
        hccl_red_type = HCCL_REDUCE_PROD;
157 158 159 160 161 162 163 164 165 166 167
        break;

      default:
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Invalid reduce type: %d", red_type));
    }

    VLOG(3) << "begin hccl allreduce, parameter is: "
      << "input num: " << numel
      << "dtype: " << dtype
      << "hccl_red_type: " << hccl_red_type
L
lw921014 已提交
168
      << ", group is: " << group;
169

L
lw921014 已提交
170 171
    PADDLE_ENFORCE_NPU_SUCCESS(platform::dynload::HcclAllReduce(
        sendbuff, recvbuff, numel, dtype, hccl_red_type, comm->comm(), (void*)stream));
172

173
    out->Resize(in->dims());
174 175
#else
    PADDLE_THROW(platform::errors::PreconditionNotMet(
176
        "PaddlePaddle should compile with NPU."));
177 178 179 180
#endif
  }
};

181 182
template <ReduceType red_type, typename T>
class CAllReduceOpCUDAKernel : public framework::OpKernel<T> {
183 184
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
185
#if defined(PADDLE_WITH_NCCL)
186 187 188
    auto in = ctx.Input<framework::Tensor>("X");
    auto out = ctx.Output<framework::Tensor>("Out");

189
    auto place = ctx.GetPlace();
190
    ncclDataType_t dtype = platform::ToHCCLDataType(in->type());
191 192 193 194 195 196
    int64_t numel = in->numel();
    const void* sendbuff = in->data<void>();
    out->Resize(in->dims());
    void* recvbuff = out->mutable_data<T>(place);

    int rid = ctx.Attr<int>("ring_id");
197
    auto comm = platform::NCCLCommContext::Instance().Get(rid, place);
198 199 200 201 202 203 204 205 206

    cudaStream_t stream = nullptr;
    if (ctx.Attr<bool>("use_calc_stream")) {
      auto dev_ctx = platform::DeviceContextPool::Instance().Get(place);
      stream = static_cast<platform::CUDADeviceContext*>(dev_ctx)->stream();
    } else {
      stream = comm->stream();
    }

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
    ncclRedOp_t nccl_red_type = ncclSum;
    switch (red_type) {
      case kRedSum:
        nccl_red_type = ncclSum;
        break;

      case kRedMax:
        nccl_red_type = ncclMax;
        break;

      case kRedMin:
        nccl_red_type = ncclMin;
        break;

      case kRedProd:
        nccl_red_type = ncclProd;
        break;

      default:
M
MRXLT 已提交
226 227
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Invalid reduce type: %d", red_type));
228 229
    }

230
    PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce(
231
        sendbuff, recvbuff, (u64)numel, dtype, nccl_red_type, comm->comm(), stream));
232
#else
M
MRXLT 已提交
233 234
    PADDLE_THROW(platform::errors::PreconditionNotMet(
        "PaddlePaddle should compile with GPU."));
235 236 237 238
#endif
  }
};

239 240 241 242 243 244 245
class CAllReduceOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() {
    AddInput("X", "(Tensor), tensor to be allreduced.");
    AddOutput("Out", "(Tensor) the allreduced result.");
    AddAttr<int>("ring_id", "(int default 0) communication ring id.")
        .SetDefault(0);
246 247 248 249
#if defined(PADDLE_WITH_ASCEND_CL)
    AddAttr<std::string>("tag", "(string default tag) tag for all reduce.")
        .SetDefault("tag");
#endif
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
    AddAttr<bool>(
        "use_calc_stream",
        "(bool default false) eject CUDA operations to calculation stream.")
        .SetDefault(false);
    AddComment(string::Sprintf(R"DOC(
CAllReduce %s Operator

Call collective AllReduce with reduce type %s. If input and output are
the same variable, in-place allreduce will be used.
Reference: https://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/docs/usage/operations.html#allreduce
)DOC",
                               GetName(), GetName()));
  }

 protected:
  virtual std::string GetName() const = 0;
};

268 269
}  // namespace operators
}  // namespace paddle