build_strategy.h 12.0 KB
Newer Older
Y
yuyang18 已提交
1
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
// Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
Y
yuyang18 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

Q
Qiao Longfei 已提交
18
#include <memory>
Y
yuyang18 已提交
19
#include <string>
20
#include <unordered_set>
C
chengduo 已提交
21
#include <utility>
22
#include <vector>
23

24 25 26 27 28 29
#include "paddle/fluid/framework/ir/pass_builder.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"

W
wanghuancoder 已提交
30 31 32 33 34 35 36 37 38 39 40 41
namespace paddle {
namespace framework {
namespace ir {
class Graph;
class PassBuilder;
}  // namespace ir
}  // namespace framework
namespace platform {
class NCCLCommunicator;
}  // namespace platform
}  // namespace paddle

42
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
43
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
44
#elif defined(PADDLE_WITH_XPU) && defined(PADDLE_WITH_XPU_BKCL)
45
#include "paddle/fluid/platform/device/xpu/bkcl_helper.h"
46
#endif
Y
yuyang18 已提交
47

Y
yuyang18 已提交
48 49 50
namespace paddle {
namespace framework {
namespace details {
51 52
using DeviceType = paddle::platform::DeviceType;
namespace p = paddle::platform;
Y
yuyang18 已提交
53 54

struct BuildStrategy {
C
chengduo 已提交
55 56 57 58 59 60 61 62 63
  // ParallelExecutor supports two modes of ReduceStrategy, kAllReduce and
  // kReduce, for CPU and GPU. If you use kAllReduce, different threads
  // optimize their parameters separately. If you use kReduce, the optimizations
  // of parameters are distributed to different threads.
  // For example, a model has 100 parameters and is running with four threads,
  // if you choose kAllReduce, every thread is to optimize 100 parameters
  // separately, if you choose kReduce, every thread is to optimize 25
  // parameters.
  // Of particular note is, if you use kReduce when using CPU training,
64 65
  // all the parameters are shared between different threads. This
  // feature will save memory.
C
chengduo 已提交
66 67 68 69 70 71 72 73 74
  // FIXME(zcd): The result of the two modes(kAllReduce and kReduce) maybe not
  // equal for GPU. Because, the result of the different order of summing maybe
  // different, for example, the result of `a+b+c+d` may be different with the
  // result of `c+a+b+d`.
  // For GPU, the implementation of kAllReduce and kReduce is adopted NCCL,
  // so the result of kAllReduce and kReduce maybe not equal.
  // For CPU, if you want to fix the order of summing to make the result
  // of kAllReduce and kReduce no diff, you can add
  // `FLAGS_cpu_deterministic=true` to env.
75
  enum class ReduceStrategy { kAllReduce = 0, kReduce = 1, kNoReduce = 2 };
Y
yuyang18 已提交
76 77 78 79

  enum class GradientScaleStrategy {
    kCoeffNumDevice = 0,
    kOne = 1,
C
chengduo 已提交
80 81
    // user can customize gradient scale to use, and just feed
    // it into exe.run().
Y
yuyang18 已提交
82 83 84
    kCustomized = 2,
  };

Y
yuyang18 已提交
85
  ReduceStrategy reduce_{ReduceStrategy::kAllReduce};
Y
yuyang18 已提交
86
  GradientScaleStrategy gradient_scale_{GradientScaleStrategy::kCoeffNumDevice};
Y
yuyang18 已提交
87 88

  std::string debug_graphviz_path_{""};
F
fengjiayi 已提交
89

C
chengduo 已提交
90
  // Add dependency between backward ops and optimization ops, make sure that
91 92
  // all the backward ops are finished before running the optimization
  // ops. It might make the training speed of data parallelism faster.
93
  bool enable_backward_optimizer_op_deps_{true};
C
chengduo 已提交
94 95 96 97 98 99 100 101
  // TODO(dev-paddle): enable_sequential_execution depends on
  // kStaleProgramOpDescs, it is not appropriate, because kStaleProgramOpDescs
  // will be removed in the near future.
  bool enable_sequential_execution_{false};
  bool remove_unnecessary_lock_{true};
  // TODO(dev-paddle): cache_runtime_context may cause some models to hang up
  // while running.
  bool cache_runtime_context_{false};
C
chengduo 已提交
102

Z
Zeng Jinle 已提交
103 104 105
  // Fix the op run order.
  bool fix_op_run_order_{false};

106 107 108
  // Lowering sub-graph into cinn ops.
  bool build_cinn_pass_{false};

C
chengduo 已提交
109 110 111
  // Operator fusion
  // TODO(dev-paddle): fuse_elewise_add_act_ops may cause some models have
  // cycle.
Z
Zhen Wang 已提交
112
  bool fuse_bn_act_ops_{false};
Z
Zhang Ting 已提交
113
  bool fuse_bn_add_act_ops_{true};
114 115
  bool fuse_elewise_add_act_ops_{false};
  bool enable_auto_fusion_{false};
C
chengduo 已提交
116 117
  // Fuse_all_optimizer_ops and fuse_all_reduce_ops require that gradients
  // should not be sparse types
118
  paddle::optional<bool> fuse_all_optimizer_ops_{false};
119
  paddle::optional<bool> fuse_all_reduce_ops_{paddle::none};
C
chengduo 已提交
120 121
  // fuse_relu_depthwise_conv can fuse the `relu ->
  // depthwise_conv`
122
  bool fuse_relu_depthwise_conv_{false};
C
chengduo 已提交
123 124 125 126
  // NOTE(zcd): In reduce mode, fusing broadcast ops may make the program
  // faster. Because fusing broadcast OP equals delaying the execution of all
  // broadcast Ops, in this case, all nccl streams are used only for reduce
  // operations for a period of time.
127
  paddle::optional<bool> fuse_broadcast_ops_{paddle::none};
C
chengduo 已提交
128
  // replace batch_norm with sync_batch_norm.
Q
qingqing01 已提交
129
  bool sync_batch_norm_{false};
130 131
  // Fuse GEMM+Epilogue via cublasLt epilogue.
  bool fuse_gemm_epilogue_{false};
Q
qingqing01 已提交
132

C
chengduo 已提交
133 134 135 136 137 138 139
  // mkldnn_enabled_op_types specify the operator type list to
  // use MKLDNN acceleration. It is null in default, means
  // that all the operators supported by MKLDNN will be
  // accelerated. And it should not be set when
  // FLAGS_use_mkldnn=false
  std::unordered_set<std::string> mkldnn_enabled_op_types_;

140 141 142
  // By default, memory_optimize would be opened if gc is disabled, and
  // be closed if gc is enabled.
  // Users can forcely enable/disable memory_optimize by setting True/False.
143
  paddle::optional<bool> memory_optimize_{paddle::none};
144 145 146 147

  // Turn on inplace by default.
  bool enable_inplace_{true};

148 149 150
  // Turn off inplace addto by default.
  bool enable_addto_{false};

151 152
  bool allow_cuda_graph_capture_{false};

153
  // Inference pass
H
Hui Zhang 已提交
154 155 156 157 158 159 160
  bool enable_inference_pass_{false};  // switch for infernce pass
  bool delete_dropout_{true};          // delte dropout op
#ifdef PADDLE_WITH_MKLDNN
  bool use_mkldnn_{true};  // use mkdnn to do inference
#else
  bool use_mkldnn_{false};  // use mkdnn to do inference
#endif
161

162 163 164 165
  // FIXME(zcd): is_distribution_ is a temporary field, because in pserver mode,
  // num_trainers is 1, so the current fields of build_strategy doesn't tell if
  // it's distributed model.
  bool is_distribution_{false};
Q
can run  
Qiao Longfei 已提交
166
  bool async_mode_{false};
167
  int num_trainers_{1};
168 169
  int trainer_id_{0};
  std::vector<std::string> trainers_endpoints_;
170

C
chengduo 已提交
171
  // NCCL config
172
  size_t nccl_comm_num_{1};
173
  size_t bkcl_comm_num_{1};
174 175 176
  // The picture is here:
  // https://github.com/PaddlePaddle/Paddle/pull/17263#discussion_r285411396
  bool use_hierarchical_allreduce_{false};
T
tianshuo78520a 已提交
177
  // Nccl ranks in a node when use hierarchical allreduce, it's set to gpu
178 179
  // cards' number in most cases.
  size_t hierarchical_allreduce_inter_nranks_{0};
T
tianshuo78520a 已提交
180
  // Nccl ranks bewteen nodes when use hierarchical allreduce, it's set to
181 182 183
  // nodes number.
  size_t hierarchical_allreduce_exter_nranks_{0};

X
Xin Pan 已提交
184 185 186 187 188
  // NOTE:
  // Before you add new options, think if it's a general strategy that works
  // with other strategy. If not, the strategy should be created through
  // CreatePassesFromStrategy and the pass can be managed separately.

X
Xin Pan 已提交
189
  // User normally doesn't need to call this API.
X
Xin Pan 已提交
190
  // The PassBuilder allows for more customized insert, remove of passes
X
Xin Pan 已提交
191 192 193
  // from python side.
  // A new PassBuilder is created based on configs defined above and
  // passes are owned by the PassBuilder.
194
  std::shared_ptr<ir::PassBuilder> CreatePassesFromStrategy(
X
Xin Pan 已提交
195 196 197
      bool finalize_strategy) const;

  bool IsFinalized() const { return is_finalized_; }
198

199 200 201 202 203
  void ClearFinalized() {
    pass_builder_ = nullptr;
    is_finalized_ = false;
  }

204 205
  bool IsMultiDevPass(const std::string &pass_name) const;

X
Xin Pan 已提交
206 207
  // Apply the passes built by the pass_builder_. The passes will be
  // applied to the Program and output an ir::Graph.
208 209
  ir::Graph *Apply(ir::Graph *graph,
                   const std::vector<platform::Place> &places,
210 211 212
                   const std::string &loss_var_name,
                   const std::vector<Scope *> &local_scopes,
                   const size_t &nranks,
213
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
214
                   DeviceType use_device,
215
                   platform::NCCLCommunicator *nccl_ctxs) const;
216 217 218
#elif defined(PADDLE_WITH_XPU) && defined(PADDLE_WITH_XPU_BKCL)
                   DeviceType use_device,
                   platform::BKCLCommunicator *bkcl_ctxs) const;
219
#else
220
                   DeviceType use_device) const;
221 222
#endif

223 224 225 226 227 228 229
  // If set true, ParallelExecutor would build the main_program into multiple
  // graphs,
  // each of the graphs would run with one device. This approach can achieve
  // better performance
  // on some scenarios.
  mutable bool enable_parallel_graph_ = false;

230
 private:
X
Xin Pan 已提交
231
  mutable bool is_finalized_ = false;
232
  mutable std::shared_ptr<ir::PassBuilder> pass_builder_;
Y
yuyang18 已提交
233 234
};

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
inline std::ostream &operator<<(std::ostream &os,
                                const BuildStrategy &strategy) {
  os << "BuildStrategy: " << &strategy << std::endl;
  os << "reduce_: " << static_cast<int>(strategy.reduce_) << std::endl;
  os << "gradient_scale_: " << static_cast<int>(strategy.gradient_scale_)
     << std::endl;
  os << "debug_graphviz_path_: " << strategy.debug_graphviz_path_ << std::endl;
  os << "enable_backward_optimizer_op_deps_: "
     << strategy.enable_backward_optimizer_op_deps_ << std::endl;
  os << "enable_sequential_execution_: "
     << strategy.enable_sequential_execution_ << std::endl;
  os << "remove_unnecessary_lock_: " << strategy.remove_unnecessary_lock_
     << std::endl;
  os << "cache_runtime_context_: " << strategy.cache_runtime_context_
     << std::endl;
  os << "fix_op_run_order_: " << strategy.fix_op_run_order_ << std::endl;
  os << "fuse_bn_act_ops_: " << strategy.fuse_bn_act_ops_ << std::endl;
  os << "fuse_bn_add_act_ops_: " << strategy.fuse_bn_add_act_ops_ << std::endl;
  os << "fuse_elewise_add_act_ops_: " << strategy.fuse_elewise_add_act_ops_
     << std::endl;
  os << "enable_auto_fusion_: " << strategy.enable_auto_fusion_ << std::endl;
  os << "fuse_all_optimizer_ops_: " << strategy.fuse_all_optimizer_ops_
     << std::endl;
  os << "fuse_all_reduce_ops_: " << strategy.fuse_all_reduce_ops_ << std::endl;
  os << "fuse_relu_depthwise_conv_: " << strategy.fuse_relu_depthwise_conv_
     << std::endl;
  os << "fuse_broadcast_ops_: " << strategy.fuse_broadcast_ops_ << std::endl;
  os << "sync_batch_norm_: " << strategy.sync_batch_norm_ << std::endl;
  os << "fuse_gemm_epilogue_: " << strategy.fuse_gemm_epilogue_ << std::endl;
  os << "mkldnn_enabled_op_types_: ";
  for (auto str : strategy.mkldnn_enabled_op_types_) {
    os << str << ", ";
  }
  os << std::endl;
  os << "memory_optimize_: " << strategy.memory_optimize_ << std::endl;
  os << "enable_inplace_: " << strategy.enable_inplace_ << std::endl;
  os << "allow_cuda_graph_capture_: " << strategy.allow_cuda_graph_capture_
     << std::endl;
  os << "enable_inference_pass_: " << strategy.enable_inference_pass_
     << std::endl;
  os << "delete_dropout_: " << strategy.delete_dropout_ << std::endl;
  os << "use_mkldnn_: " << strategy.use_mkldnn_ << std::endl;
  os << "is_distribution_: " << strategy.is_distribution_ << std::endl;
  os << "async_mode_: " << strategy.async_mode_ << std::endl;
  os << "num_trainers_: " << strategy.num_trainers_ << std::endl;
  os << "trainer_id_: " << strategy.trainer_id_ << std::endl;
  os << "trainers_endpoints_: ";
  for (auto str : strategy.trainers_endpoints_) {
    os << str << ", ";
  }
  os << std::endl;
  os << "nccl_comm_num_: " << strategy.nccl_comm_num_ << std::endl;
  os << "bkcl_comm_num_: " << strategy.bkcl_comm_num_ << std::endl;
  os << "use_hierarchical_allreduce_: " << strategy.use_hierarchical_allreduce_
     << std::endl;
  os << "hierarchical_allreduce_inter_nranks_: "
     << strategy.hierarchical_allreduce_inter_nranks_ << std::endl;
  os << "enable_parallel_graph_: " << strategy.enable_parallel_graph_
     << std::endl;
  return os;
}

Y
yuyang18 已提交
297 298 299
}  // namespace details
}  // namespace framework
}  // namespace paddle