all_reduce_op_handle.h 2.4 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

X
Xin Pan 已提交
17 18 19
#include <string>
#include <vector>

Y
Yu Yang 已提交
20 21 22
#include "paddle/fluid/framework/details/op_handle_base.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
P
peizhilin 已提交
23
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yu Yang 已提交
24
#include "paddle/fluid/platform/nccl_helper.h"
C
chengduoZH 已提交
25
#endif
Y
Yu Yang 已提交
26 27 28 29 30

namespace paddle {
namespace framework {
namespace details {

31 32 33 34 35 36 37
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
constexpr char g_dgc_counter_name[] = "__g_dgc_counter__";
constexpr char g_dgc_rampup_begin_step[] = "__g_rampup_begin_step__";
constexpr char g_dgc_encoded[] = "__dgc_encoded__";
constexpr char g_dgc_k[] = "__dgc_k__";
#endif

38
struct AllReduceOpHandle : public OpHandleBase {
P
peizhilin 已提交
39
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
40
  AllReduceOpHandle(ir::Node *node, const std::vector<Scope *> &local_scopes,
41
                    const std::vector<platform::Place> &places,
42 43
                    const platform::NCCLContextMap *ctxs,
                    bool is_encoded = false, int nranks = -1);
C
chengduoZH 已提交
44
#else
X
Xin Pan 已提交
45
  AllReduceOpHandle(ir::Node *node, const std::vector<Scope *> &local_scopes,
46
                    const std::vector<platform::Place> &places);
C
chengduoZH 已提交
47
#endif
Y
Yu Yang 已提交
48 49
  std::string Name() const override;

X
Xin Pan 已提交
50 51
  // Delay and buffer nccl_all_reduce together can significantly increase
  // performance. Disable this feature by returning false.
X
Polish  
Xin Pan 已提交
52
  bool IsMultiDeviceTransfer() override { return true; };
X
Xin Pan 已提交
53

Y
Yu Yang 已提交
54 55
 protected:
  void RunImpl() override;
X
Xin Pan 已提交
56 57

 private:
Y
yuyang18 已提交
58 59
  std::vector<Scope *> local_scopes_;
  std::vector<platform::Place> places_;
P
peizhilin 已提交
60
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
61
  void RunImplEncoded();
C
chengduoZH 已提交
62
  const platform::NCCLContextMap *nccl_ctxs_;
63 64 65
  bool is_encoded_{false};
  int nranks_{-1};
  int GetKValue(const std::string &grad_name);
C
chengduoZH 已提交
66
#endif
67 68
  void RunImplNormal();
  bool IsEncoded();
Y
Yu Yang 已提交
69 70 71 72 73
};

}  // namespace details
}  // namespace framework
}  // namespace paddle