all_reduce_op_handle.cc 6.8 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
14
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
15
#include <algorithm>
C
chengduoZH 已提交
16
#include "paddle/fluid/framework/details/container_cast.h"
C
chengduoZH 已提交
17
#include "paddle/fluid/framework/details/reduce_and_gather.h"
C
chengduoZH 已提交
18
#include "paddle/fluid/framework/details/variable_visitor.h"
19 20
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/gpu_info.h"
21
#include "paddle/fluid/platform/profiler.h"
Y
Stash  
Yu Yang 已提交
22

23
// asynchronous nccl allreduce or synchronous issue:
Y
Yancey1989 已提交
24 25
// https://github.com/PaddlePaddle/Paddle/issues/15049
DEFINE_bool(
26
    sync_nccl_allreduce, true,
Y
Yancey1989 已提交
27 28 29
    "If set true, will call `cudaStreamSynchronize(nccl_stream)`"
    "after allreduce, this mode can get better performance in some scenarios.");

Y
Yu Yang 已提交
30 31 32
namespace paddle {
namespace framework {
namespace details {
C
chengduoZH 已提交
33

P
peizhilin 已提交
34
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
35 36
AllReduceOpHandle::AllReduceOpHandle(ir::Node *node,
                                     const std::vector<Scope *> &local_scopes,
37
                                     const std::vector<platform::Place> &places,
G
gongweibao 已提交
38
                                     const platform::NCCLContextMap *ctxs)
X
Xin Pan 已提交
39 40 41
    : OpHandleBase(node),
      local_scopes_(local_scopes),
      places_(places),
G
gongweibao 已提交
42
      nccl_ctxs_(ctxs) {
43
  if (nccl_ctxs_) {
C
chengduoZH 已提交
44
    for (auto &p : places_) {
C
chengduo 已提交
45
      this->SetDeviceContext(p, nccl_ctxs_->DevCtx(p));
C
chengduoZH 已提交
46
    }
Y
Yu Yang 已提交
47 48
  }
}
C
chengduoZH 已提交
49
#else
X
Xin Pan 已提交
50 51
AllReduceOpHandle::AllReduceOpHandle(ir::Node *node,
                                     const std::vector<Scope *> &local_scopes,
52
                                     const std::vector<platform::Place> &places)
X
Xin Pan 已提交
53
    : OpHandleBase(node), local_scopes_(local_scopes), places_(places) {}
C
chengduoZH 已提交
54
#endif
Y
Yu Yang 已提交
55

56
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
G
gongweibao 已提交
57 58
void AllReduceOpHandle::RunAllReduceFuncs(
    const std::vector<std::function<void()>> &all_reduce_calls) {
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
  this->RunAndRecordEvent([&] {
    if (all_reduce_calls.size() == 1UL) {
      // Do not use NCCLGroup when manage NCCL by per thread per device
      all_reduce_calls[0]();
    } else {
      platform::NCCLGroupGuard guard;
      for (auto &call : all_reduce_calls) {
        call();
      }
    }
  });

  if (FLAGS_sync_nccl_allreduce) {
    for (auto &p : places_) {
      int dev_id = boost::get<platform::CUDAPlace>(p).device;
      auto &nccl_ctx = nccl_ctxs_->at(dev_id);
      auto stream = nccl_ctx.stream();
      cudaError_t e_sync = cudaStreamSynchronize(stream);
      if (e_sync != 0) {
        LOG(FATAL) << "cudaStreamSynchronize " << cudaGetErrorString(e_sync);
      }

      cudaError_t e_get = cudaGetLastError();
      if (e_get != 0) {
        LOG(FATAL) << "cudaGetLastError  " << cudaGetErrorString(e_get)
                   << " errno:" << e_get;
      }
    }
  }
}
#endif

91
void AllReduceOpHandle::RunImpl() {
92
  platform::RecordEvent record_event(Name());
Y
Yancey1989 已提交
93

Y
Yancey1989 已提交
94
  WaitInputVarGenerated();
95

Y
Yancey1989 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109
  auto in_var_handles = DynamicCast<VarHandle>(this->Inputs());
  auto out_var_handles = DynamicCast<VarHandle>(this->Outputs());
  PADDLE_ENFORCE_EQ(
      in_var_handles.size(), places_.size(),
      "The NoDummyInputSize should be equal to the number of places.");
  PADDLE_ENFORCE_EQ(
      in_var_handles.size(), out_var_handles.size(),
      "The NoDummyInputSize and NoDummyOutputSize should be equal.");

  std::vector<const LoDTensor *> lod_tensors;
  for (size_t i = 0; i < local_scopes_.size(); ++i) {
    auto *s = local_scopes_[i];
    auto &local_scope = *s->FindVar(kLocalExecScopeName)->Get<Scope *>();
    auto &lod_tensor =
G
gongweibao 已提交
110
        local_scope.FindVar(in_var_handles[i]->name())->Get<LoDTensor>();
Y
Yancey1989 已提交
111
    lod_tensors.emplace_back(&lod_tensor);
112 113
    VLOG(10) << "place:" << i << ", input_name:" << in_var_handles[i]->name()
             << ", out_name:" << out_var_handles[i]->name();
G
gongweibao 已提交
114
    PADDLE_ENFORCE_EQ(in_var_handles[i]->name(), out_var_handles[i]->name(),
Y
Yancey1989 已提交
115 116
                      "The name of input and output should be equal.");
  }
Y
Stash  
Yu Yang 已提交
117

Y
Yancey1989 已提交
118
  if (platform::is_gpu_place(lod_tensors[0]->place())) {
P
peizhilin 已提交
119
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yancey1989 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
    PADDLE_ENFORCE(nccl_ctxs_, "nccl_ctxs should not be nullptr.");
    int dtype = -1;
    size_t numel = 0;
    std::vector<std::function<void()>> all_reduce_calls;
    for (size_t i = 0; i < local_scopes_.size(); ++i) {
      auto &p = places_[i];
      auto &lod_tensor = *lod_tensors[i];
      void *buffer = const_cast<void *>(lod_tensor.data<void>());

      if (dtype == -1) {
        dtype = platform::ToNCCLDataType(lod_tensor.type());
      }

      if (numel == 0) {
        numel = static_cast<size_t>(lod_tensor.numel());
      }

      int dev_id = boost::get<platform::CUDAPlace>(p).device;
      auto &nccl_ctx = nccl_ctxs_->at(dev_id);
      auto stream = nccl_ctx.stream();
      auto comm = nccl_ctx.comm_;
141 142 143 144 145

      VLOG(10) << "before all reduce buffer:" << buffer << ", numel:" << numel
               << ", dev_id:" << dev_id << ", dtype:" << dtype
               << ", place:" << p;

Y
Yancey1989 已提交
146 147 148 149 150 151
      all_reduce_calls.emplace_back([=] {
        PADDLE_ENFORCE(platform::dynload::ncclAllReduce(
            buffer, buffer, numel, static_cast<ncclDataType_t>(dtype), ncclSum,
            comm, stream));
      });
    }
G
gongweibao 已提交
152
    RunAllReduceFuncs(all_reduce_calls);
C
chengduoZH 已提交
153
#else
Y
Yancey1989 已提交
154
    PADDLE_THROW("Not compiled with CUDA");
C
chengduoZH 已提交
155
#endif
Y
Yancey1989 已提交
156 157 158 159
  } else {  // Special handle CPU only Operator's gradient. Like CRF
    auto &trg = *this->local_scopes_[0]
                     ->FindVar(kLocalExecScopeName)
                     ->Get<Scope *>()
G
gongweibao 已提交
160
                     ->FindVar(out_var_handles[0]->name())
Y
Yancey1989 已提交
161 162 163 164 165 166 167 168 169 170
                     ->GetMutable<framework::LoDTensor>();

    // Reduce All Tensor to trg in CPU
    ReduceLoDTensor func(lod_tensors, &trg);
    VisitDataType(lod_tensors[0]->type(), func);

    for (size_t i = 1; i < local_scopes_.size(); ++i) {
      auto &scope =
          *local_scopes_[i]->FindVar(kLocalExecScopeName)->Get<Scope *>();
      auto &p = places_[i];
G
gongweibao 已提交
171
      auto *var = scope.FindVar(out_var_handles[i]->name());
Y
Yancey1989 已提交
172 173 174 175 176 177 178
      auto *dev_ctx = dev_ctxes_.at(p);

      RunAndRecordEvent(p, [&trg, var, dev_ctx, p] {
        auto &tensor_gpu = *var->GetMutable<framework::LoDTensor>();
        auto &tensor_cpu = trg;
        TensorCopy(tensor_cpu, p, *dev_ctx, &tensor_gpu);
      });
Y
Yu Yang 已提交
179 180 181
    }
  }
}
Y
Yu Yang 已提交
182

C
chengduoZH 已提交
183
std::string AllReduceOpHandle::Name() const { return "all_reduce"; }
Y
Yu Yang 已提交
184 185 186
}  // namespace details
}  // namespace framework
}  // namespace paddle