build_strategy.h 8.7 KB
Newer Older
Y
yuyang18 已提交
1
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
// Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
Y
yuyang18 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

Q
Qiao Longfei 已提交
18
#include <memory>
Y
yuyang18 已提交
19
#include <string>
20
#include <unordered_set>
C
chengduo 已提交
21
#include <utility>
22
#include <vector>
23

24 25 26 27 28 29
#include "paddle/fluid/framework/ir/pass_builder.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"

W
wanghuancoder 已提交
30 31 32 33 34 35 36 37 38 39 40 41
namespace paddle {
namespace framework {
namespace ir {
class Graph;
class PassBuilder;
}  // namespace ir
}  // namespace framework
namespace platform {
class NCCLCommunicator;
}  // namespace platform
}  // namespace paddle

42
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
43
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
44
#elif defined(PADDLE_WITH_XPU) && defined(PADDLE_WITH_XPU_BKCL)
45
#include "paddle/fluid/platform/device/xpu/bkcl_helper.h"
46
#endif
Y
yuyang18 已提交
47

Y
yuyang18 已提交
48 49 50
namespace paddle {
namespace framework {
namespace details {
51 52
using DeviceType = paddle::platform::DeviceType;
namespace p = paddle::platform;
Y
yuyang18 已提交
53 54

struct BuildStrategy {
C
chengduo 已提交
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
  // ParallelExecutor supports two modes of ReduceStrategy, kAllReduce and
  // kReduce, for CPU and GPU. If you use kAllReduce, different threads
  // optimize their parameters separately. If you use kReduce, the optimizations
  // of parameters are distributed to different threads.
  // For example, a model has 100 parameters and is running with four threads,
  // if you choose kAllReduce, every thread is to optimize 100 parameters
  // separately, if you choose kReduce, every thread is to optimize 25
  // parameters.
  // Of particular note is, if you use kReduce when using CPU training,
  // all the parameters are shared between different threads. This feature will
  // save memory.
  // FIXME(zcd): The result of the two modes(kAllReduce and kReduce) maybe not
  // equal for GPU. Because, the result of the different order of summing maybe
  // different, for example, the result of `a+b+c+d` may be different with the
  // result of `c+a+b+d`.
  // For GPU, the implementation of kAllReduce and kReduce is adopted NCCL,
  // so the result of kAllReduce and kReduce maybe not equal.
  // For CPU, if you want to fix the order of summing to make the result
  // of kAllReduce and kReduce no diff, you can add
  // `FLAGS_cpu_deterministic=true` to env.
75
  enum class ReduceStrategy { kAllReduce = 0, kReduce = 1, kNoReduce = 2 };
Y
yuyang18 已提交
76 77 78 79

  enum class GradientScaleStrategy {
    kCoeffNumDevice = 0,
    kOne = 1,
C
chengduo 已提交
80 81
    // user can customize gradient scale to use, and just feed
    // it into exe.run().
Y
yuyang18 已提交
82 83 84
    kCustomized = 2,
  };

Y
yuyang18 已提交
85
  ReduceStrategy reduce_{ReduceStrategy::kAllReduce};
Y
yuyang18 已提交
86
  GradientScaleStrategy gradient_scale_{GradientScaleStrategy::kCoeffNumDevice};
Y
yuyang18 已提交
87 88

  std::string debug_graphviz_path_{""};
F
fengjiayi 已提交
89

C
chengduo 已提交
90 91 92
  // Add dependency between backward ops and optimization ops, make sure that
  // all the backward ops are finished before running the optimization ops.
  // It might make the training speed of data parallelism faster.
93
  bool enable_backward_optimizer_op_deps_{true};
C
chengduo 已提交
94 95 96 97 98 99 100 101
  // TODO(dev-paddle): enable_sequential_execution depends on
  // kStaleProgramOpDescs, it is not appropriate, because kStaleProgramOpDescs
  // will be removed in the near future.
  bool enable_sequential_execution_{false};
  bool remove_unnecessary_lock_{true};
  // TODO(dev-paddle): cache_runtime_context may cause some models to hang up
  // while running.
  bool cache_runtime_context_{false};
C
chengduo 已提交
102

Z
Zeng Jinle 已提交
103 104 105
  // Fix the op run order.
  bool fix_op_run_order_{false};

C
chengduo 已提交
106 107 108
  // Operator fusion
  // TODO(dev-paddle): fuse_elewise_add_act_ops may cause some models have
  // cycle.
Z
Zhen Wang 已提交
109
  bool fuse_bn_act_ops_{false};
Z
Zhang Ting 已提交
110
  bool fuse_bn_add_act_ops_{true};
111 112
  bool fuse_elewise_add_act_ops_{false};
  bool enable_auto_fusion_{false};
C
chengduo 已提交
113 114
  // Fuse_all_optimizer_ops and fuse_all_reduce_ops require that gradients
  // should not be sparse types
115
  paddle::optional<bool> fuse_all_optimizer_ops_{false};
116
  paddle::optional<bool> fuse_all_reduce_ops_{paddle::none};
C
chengduo 已提交
117 118
  // fuse_relu_depthwise_conv can fuse the `relu ->
  // depthwise_conv`
119
  bool fuse_relu_depthwise_conv_{false};
C
chengduo 已提交
120 121 122 123
  // NOTE(zcd): In reduce mode, fusing broadcast ops may make the program
  // faster. Because fusing broadcast OP equals delaying the execution of all
  // broadcast Ops, in this case, all nccl streams are used only for reduce
  // operations for a period of time.
124
  paddle::optional<bool> fuse_broadcast_ops_{paddle::none};
C
chengduo 已提交
125
  // replace batch_norm with sync_batch_norm.
Q
qingqing01 已提交
126
  bool sync_batch_norm_{false};
127 128
  // Fuse GEMM+Epilogue via cublasLt epilogue.
  bool fuse_gemm_epilogue_{false};
Q
qingqing01 已提交
129

C
chengduo 已提交
130 131 132 133 134 135 136
  // mkldnn_enabled_op_types specify the operator type list to
  // use MKLDNN acceleration. It is null in default, means
  // that all the operators supported by MKLDNN will be
  // accelerated. And it should not be set when
  // FLAGS_use_mkldnn=false
  std::unordered_set<std::string> mkldnn_enabled_op_types_;

137 138 139
  // By default, memory_optimize would be opened if gc is disabled, and
  // be closed if gc is enabled.
  // Users can forcely enable/disable memory_optimize by setting True/False.
140
  paddle::optional<bool> memory_optimize_{paddle::none};
141 142 143 144

  // Turn on inplace by default.
  bool enable_inplace_{true};

145 146 147
  // Turn off inplace addto by default.
  bool enable_addto_{false};

148 149
  bool allow_cuda_graph_capture_{false};

150 151 152 153
  // Inference pass
  bool inference_{false};  // switch for infernce pass
  bool del_dropout_{false};

154 155 156 157
  // FIXME(zcd): is_distribution_ is a temporary field, because in pserver mode,
  // num_trainers is 1, so the current fields of build_strategy doesn't tell if
  // it's distributed model.
  bool is_distribution_{false};
Q
can run  
Qiao Longfei 已提交
158
  bool async_mode_{false};
159
  int num_trainers_{1};
160 161
  int trainer_id_{0};
  std::vector<std::string> trainers_endpoints_;
162

C
chengduo 已提交
163
  // NCCL config
164
  size_t nccl_comm_num_{1};
165
  size_t bkcl_comm_num_{1};
166 167 168
  // The picture is here:
  // https://github.com/PaddlePaddle/Paddle/pull/17263#discussion_r285411396
  bool use_hierarchical_allreduce_{false};
T
tianshuo78520a 已提交
169
  // Nccl ranks in a node when use hierarchical allreduce, it's set to gpu
170 171
  // cards' number in most cases.
  size_t hierarchical_allreduce_inter_nranks_{0};
T
tianshuo78520a 已提交
172
  // Nccl ranks bewteen nodes when use hierarchical allreduce, it's set to
173 174 175
  // nodes number.
  size_t hierarchical_allreduce_exter_nranks_{0};

X
Xin Pan 已提交
176 177 178 179 180
  // NOTE:
  // Before you add new options, think if it's a general strategy that works
  // with other strategy. If not, the strategy should be created through
  // CreatePassesFromStrategy and the pass can be managed separately.

X
Xin Pan 已提交
181
  // User normally doesn't need to call this API.
X
Xin Pan 已提交
182
  // The PassBuilder allows for more customized insert, remove of passes
X
Xin Pan 已提交
183 184 185
  // from python side.
  // A new PassBuilder is created based on configs defined above and
  // passes are owned by the PassBuilder.
186
  std::shared_ptr<ir::PassBuilder> CreatePassesFromStrategy(
X
Xin Pan 已提交
187 188 189
      bool finalize_strategy) const;

  bool IsFinalized() const { return is_finalized_; }
190

191 192 193 194 195
  void ClearFinalized() {
    pass_builder_ = nullptr;
    is_finalized_ = false;
  }

196 197
  bool IsMultiDevPass(const std::string &pass_name) const;

X
Xin Pan 已提交
198 199
  // Apply the passes built by the pass_builder_. The passes will be
  // applied to the Program and output an ir::Graph.
200 201
  ir::Graph *Apply(ir::Graph *graph,
                   const std::vector<platform::Place> &places,
202 203 204
                   const std::string &loss_var_name,
                   const std::vector<Scope *> &local_scopes,
                   const size_t &nranks,
205
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
206
                   DeviceType use_device,
207
                   platform::NCCLCommunicator *nccl_ctxs) const;
208 209 210
#elif defined(PADDLE_WITH_XPU) && defined(PADDLE_WITH_XPU_BKCL)
                   DeviceType use_device,
                   platform::BKCLCommunicator *bkcl_ctxs) const;
211
#else
212
                   DeviceType use_device) const;
213 214
#endif

215 216 217 218 219 220 221
  // If set true, ParallelExecutor would build the main_program into multiple
  // graphs,
  // each of the graphs would run with one device. This approach can achieve
  // better performance
  // on some scenarios.
  mutable bool enable_parallel_graph_ = false;

222
 private:
X
Xin Pan 已提交
223
  mutable bool is_finalized_ = false;
224
  mutable std::shared_ptr<ir::PassBuilder> pass_builder_;
Y
yuyang18 已提交
225 226 227 228 229
};

}  // namespace details
}  // namespace framework
}  // namespace paddle