build_strategy.cc 12.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/details/build_strategy.h"

D
dzhwinter 已提交
17 18
#include <glog/logging.h>
#include <memory>
Q
qingqing01 已提交
19
#include <utility>
D
dzhwinter 已提交
20
#include "paddle/fluid/framework/details/memory_optimize_helper.h"
21
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
22
#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h"
23
#include "paddle/fluid/framework/details/reduce_op_handle.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/framework/details/sequential_execution_pass.h"
25
#include "paddle/fluid/framework/ir/graph.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/framework/ir/graph_helper.h"
W
WangZhen 已提交
27
#include "paddle/fluid/framework/ir/graph_to_program_pass.h"
28 29 30 31 32 33
#include "paddle/fluid/framework/ir/graph_viz_pass.h"

namespace paddle {
namespace framework {
namespace details {

34
static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
Y
Yancey1989 已提交
35 36
  // Should fix the allreduce op order if scheduling
  // them in multiple threads or processes to avoid hang.
Y
Yancey1989 已提交
37
  // NOTE: ParallelGraph would execute this pass on each graph, so
Y
Yancey1989 已提交
38
  // don't need to append it here.
Y
Yancey1989 已提交
39
  return (!strategy.enable_sequential_execution_ &&
Y
Yancey1989 已提交
40 41
          strategy.num_trainers_ > 1) &&
         !strategy.enable_parallel_graph_;
42 43
}

44 45 46 47
class ParallelExecutorPassBuilder : public ir::PassBuilder {
 public:
  explicit ParallelExecutorPassBuilder(const BuildStrategy &strategy)
      : ir::PassBuilder(), strategy_(strategy) {
C
chengduo 已提交
48 49 50 51 52 53 54 55
    // Add a graph viz pass to record a graph.
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto viz_pass = AppendPass("graph_viz_pass");
      const std::string graph_path = string::Sprintf(
          "%s%s", strategy_.debug_graphviz_path_.c_str(), "_original_graph");
      viz_pass->Set<std::string>("graph_viz_path", new std::string(graph_path));
    }

D
dzhwinter 已提交
56
    if (strategy_.enable_sequential_execution_) {
C
chengduo 已提交
57
      VLOG(10) << "Add sequential_execution_pass";
D
dzhwinter 已提交
58 59
      AppendPass("sequential_execution_pass");
    }
60

Q
qingqing01 已提交
61 62 63 64 65
    // Add op fusion.
    if (strategy.sync_batch_norm_) {
      AppendPass("sync_batch_norm_pass");
    }

D
dzhwinter 已提交
66 67
    // Add op fusion.
    if (strategy.fuse_relu_depthwise_conv_) {
C
chengduo 已提交
68
      VLOG(10) << "Add fuse_relu_depthwise_conv_pass";
D
dzhwinter 已提交
69
      AppendPass("fuse_relu_depthwise_conv_pass");
D
dzhwinter 已提交
70
    }
71

D
dzhwinter 已提交
72 73 74 75 76 77
    // NOTE(dzhwinter): A note for automatical inplace.
    // 1. modify program desc passes should put
    // before inplace pass.
    // 2. manually configured inplace should put
    // before inplace_pass

D
dzhwinter 已提交
78 79
    // Add automatically inplace.
    if (strategy_.enable_inplace_) {
C
chengduo 已提交
80
      VLOG(10) << "Add inplace_pass";
D
dzhwinter 已提交
81
      AppendPass("inplace_pass");
S
sneaxiy 已提交
82 83
    }

C
chengduo 已提交
84
    if (strategy_.fuse_elewise_add_act_ops_) {
C
chengduo 已提交
85 86 87 88 89 90
      VLOG(10) << "Add fuse_elewise_add_act_pass";
      AppendPass("fuse_elewise_add_act_pass");
    }

    // for single card training, fuse_all_reduce_ops is unnecessary.
    // alloc_continuous_space_for_grad_pass should be before of MultiDevPass.
C
chengduo 已提交
91
    if (strategy_.fuse_all_reduce_ops_) {
C
chengduo 已提交
92 93 94 95
      VLOG(10) << "Add alloc_continuous_space_for_grad_pass";
      AppendPass("alloc_continuous_space_for_grad_pass");
    }

C
chengduo 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
    if (strategy_.fuse_all_optimizer_ops_) {
      if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce ||
          strategy_.is_distribution_) {
        VLOG(3)
            << "Currently, fuse_all_optimizer_ops only works under AllReduce "
               "mode.";
        strategy_.fuse_all_optimizer_ops_ = false;
      } else {
        VLOG(10) << "Add alloc_continuous_space_for_grad_pass";
        AppendPass("alloc_continuous_space_for_grad_pass");
        // NOTE: fuse_all_xx_ops will count the number of xx operator first,
        // if the number is zero, fuse_all_reduce_ops will do nothing.
        // Currently, only one type of optimization algorithm can be fused.
        VLOG(10) << "Add fuse_adam_op_pass";
        AppendPass("fuse_adam_op_pass");
        VLOG(10) << "Add fuse_sgd_op_pass";
        AppendPass("fuse_sgd_op_pass");
      }
    }

X
Xin Pan 已提交
116
    // Add a graph viz pass to record a graph.
C
chengduo 已提交
117
    if (!strategy.debug_graphviz_path_.empty()) {
118 119
      auto viz_pass = AppendPass("graph_viz_pass");
      const std::string graph_path = string::Sprintf(
C
chengduo 已提交
120
          "%s%s", strategy_.debug_graphviz_path_.c_str(), "_fused_graph");
121 122 123
      viz_pass->Set<std::string>("graph_viz_path", new std::string(graph_path));
    }

124 125 126 127
    CollectiveContext *context = CollectiveContext::GetInstance();
    context->endpoints_ = strategy_.trainers_endpoints_;
    context->trainer_id_ = strategy_.trainer_id_;
    PADDLE_ENFORCE(strategy_.trainer_id_ >= 0, "trainer_id_ >= 0");
128
    if (strategy_.trainer_id_ > 0 && strategy_.trainers_endpoints_.size() > 0) {
129 130 131 132 133 134
      PADDLE_ENFORCE((unsigned)(strategy_.trainer_id_) <
                         strategy_.trainers_endpoints_.size(),
                     "trainer_id_ < endpoints_ size");
    }
    VLOG(1) << "CollectiveContext:" << context->String();

D
dzhwinter 已提交
135 136 137 138 139
    // NOTE(dzh): memory optimize should be a runtime pass.
    // However, after multi_devices_pass, VarHandle, OpHandle is
    // the de-fact IR, any reuse on Graph is meaningless.
    // A side-effect of that, memory optimize cannot forsee the fetched vars
    // , so fetchlist should be set persistable before call the Run interface.
C
chengduo 已提交
140
    if (strategy_.memory_optimize_) {
C
chengduo 已提交
141 142
      VLOG(10) << "Add memory_optimize_pass";
      AppendPass("memory_optimize_pass");
D
dzhwinter 已提交
143
    }
144

C
chengduo 已提交
145
    AppendMultiDevPass(strategy_);
146

C
chengduo 已提交
147
    if (strategy_.fuse_all_reduce_ops_) {
C
chengduo 已提交
148 149 150 151 152 153
      // NOTE: fuse_all_reduce_ops will count the number of all_reduce operator
      // first, if the number is zero, fuse_all_reduce_ops will do nothing.
      VLOG(10) << "Add fuse_all_reduce_op_pass";
      AppendPass("fuse_all_reduce_op_pass");
    }

X
Xin Pan 已提交
154
    // Add a graph print pass to record a graph with device info.
155 156
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto multi_devices_print_pass = AppendPass("multi_devices_print_pass");
D
dzhwinter 已提交
157 158 159 160 161
      const std::string graph_path =
          string::Sprintf("%s%s", strategy_.debug_graphviz_path_.c_str(),
                          "_multi_devices_graph");
      multi_devices_print_pass->Set<std::string>(kGraphvizPath,
                                                 new std::string(graph_path));
162 163 164 165
      multi_devices_print_pass->Set<details::GraphvizSSAGraphPrinter>(
          "graph_printer", new details::GraphvizSSAGraphPrinter);
    }

166 167 168 169 170
    // experimental shows that the program will be faster if append
    // all_reduce_deps_pass here.
    if (!strategy_.enable_parallel_graph_ &&
        (SeqOnlyAllReduceOps(strategy_) ||
         strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce)) {
171 172 173
      AppendPass("all_reduce_deps_pass");
    }

S
sneaxiy 已提交
174
    if (strategy_.remove_unnecessary_lock_) {
C
chengduo 已提交
175
      VLOG(10) << "Add modify_op_lock_and_record_event_pass";
S
sneaxiy 已提交
176 177
      AppendPass("modify_op_lock_and_record_event_pass");
    }
178 179 180

    // Verify that the graph is correct for multi-device executor.
    AppendPass("multi_devices_check_pass");
181 182
  }

183 184
  // Convert graph to run on multi-devices.
  void AppendMultiDevPass(const BuildStrategy &strategy) {
C
chengduo 已提交
185
    ir::Pass *multi_devices_pass = nullptr;
C
chengduo 已提交
186
    if (strategy.is_distribution_) {
C
chengduo 已提交
187
      VLOG(10) << "Add dist_multi_devices_pass";
188 189 190
      multi_devices_pass = AppendPass("dist_multi_devices_pass").get();
    } else {
      if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
C
chengduo 已提交
191
        VLOG(10) << "Add all_reduce_mode_multi_devices_pass";
192
        multi_devices_pass =
C
chengduo 已提交
193
            AppendPass("all_reduce_mode_multi_devices_pass").get();
194
      } else if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kReduce) {
C
chengduo 已提交
195
        VLOG(10) << "Add reduce_mode_multi_devices_pass";
196 197 198 199 200 201 202 203 204
        multi_devices_pass = AppendPass("reduce_mode_multi_devices_pass").get();
      } else {
        PADDLE_THROW("Unknown reduce strategy.");
      }
    }
    multi_devices_pass->SetNotOwned<const BuildStrategy>("strategy",
                                                         &strategy_);
  }

205 206 207 208
 private:
  BuildStrategy strategy_;
};

209
std::shared_ptr<ir::PassBuilder> BuildStrategy::CreatePassesFromStrategy(
X
Xin Pan 已提交
210 211
    bool finalize_strategy) const {
  if (is_finalized_) {
212 213
    return pass_builder_;
  }
214
  pass_builder_.reset(new ParallelExecutorPassBuilder(*this));
X
Xin Pan 已提交
215 216
  if (finalize_strategy) {
    is_finalized_ = true;
217
  }
X
fix  
Xin Pan 已提交
218
  return pass_builder_;
219 220
}

221 222 223 224
bool BuildStrategy::IsMultiDevPass(const std::string &pass_name) const {
  return framework::details::MultiDevSSAGraphBuilder().count(pass_name) > 0;
}

225 226 227 228 229
ir::Graph *BuildStrategy::Apply(ir::Graph *graph,
                                const std::vector<platform::Place> &places,
                                const std::string &loss_var_name,
                                const std::vector<Scope *> &local_scopes,
                                const size_t &nranks,
P
peizhilin 已提交
230
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
231 232
                                const bool use_cuda,
                                platform::NCCLContextMap *nccl_ctxs) const {
233
#else
234
                                const bool use_cuda) const {
235
#endif
236 237
  // Create a default one if not finalized by user.
  CreatePassesFromStrategy(false);
X
fix  
Xin Pan 已提交
238 239

  for (std::shared_ptr<ir::Pass> &pass : pass_builder_->AllPasses()) {
240 241 242 243 244 245 246
    if (IsMultiDevPass(pass->Type())) {
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLossVarName);
      pass->SetNotOwned<const std::string>(kLossVarName, &loss_var_name);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
X
fix  
Xin Pan 已提交
247
                                                    &local_scopes);
248 249
      pass->Erase(kNRanks);
      pass->Set<size_t>(kNRanks, new size_t(nranks));
Y
Yancey1989 已提交
250

P
peizhilin 已提交
251
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
fix  
Xin Pan 已提交
252
      platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
C
chengduo 已提交
253 254
      pass->Erase(kNCCLCtxs);
      pass->SetNotOwned<platform::NCCLContextMap>(kNCCLCtxs, nctx);
255
#endif
C
chengduo 已提交
256 257 258 259
    } else if (pass->Type() == "alloc_continuous_space_for_grad_pass" ||
               pass->Type() == "fuse_adam_op_pass" ||
               pass->Type() == "fuse_sgd_op_pass" ||
               pass->Type() == "fuse_all_reduce_op_pass") {
C
chengduo 已提交
260 261 262 263 264
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
                                                    &local_scopes);
C
chengduo 已提交
265
      if (pass->Type() == "fuse_all_reduce_op_pass") {
C
chengduo 已提交
266
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
C
chengduo 已提交
267 268 269
        platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
        pass->Erase(kNCCLCtxs);
        pass->SetNotOwned<platform::NCCLContextMap>(kNCCLCtxs, nctx);
C
chengduo 已提交
270
#endif
C
chengduo 已提交
271
      }
C
chengduo 已提交
272 273 274 275 276 277
    } else if (pass->Type() == "alloc_continuous_space_for_grad_pass") {
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
                                                    &local_scopes);
S
sneaxiy 已提交
278
    } else if (pass->Type() == "sequential_execution_pass") {
279 280
      LOG(INFO) << "set enable_sequential_execution:"
                << enable_sequential_execution_;
281
    } else if (pass->Type() == "all_reduce_deps_pass") {
282 283
      LOG(INFO) << "SeqOnlyAllReduceOps:" << SeqOnlyAllReduceOps(*this)
                << ", num_trainers:" << num_trainers_;
284 285 286 287 288 289
    } else if (pass->Type() == "fuse_relu_depthwise_conv_pass") {
      if (!use_cuda) {
        LOG(WARNING) << "fuse_relu_depthwise_conv_pass is only supported on "
                        "GPU, skipped.";
        continue;
      }
X
fix  
Xin Pan 已提交
290
    }
291
    VLOG(3) << "Start Apply Pass " << pass->Type();
292
    graph = pass->Apply(graph);
293
    VLOG(3) << "Finish Apply Pass " << pass->Type();
X
fix  
Xin Pan 已提交
294
  }
295 296
  return graph;
}
D
dzhwinter 已提交
297

298 299 300 301
}  // namespace details
}  // namespace framework
}  // namespace paddle

Q
qingqing01 已提交
302
USE_PASS(sync_batch_norm_pass);
303
USE_PASS(fuse_relu_depthwise_conv_pass);
304 305
USE_PASS(fuse_elewise_add_act_pass);
USE_PASS(graph_viz_pass);
306
USE_PASS(multi_batch_merge_pass);
307
USE_PASS(reduce_mode_multi_devices_pass);
C
chengduo 已提交
308
USE_PASS(all_reduce_mode_multi_devices_pass);
309
USE_PASS(dist_multi_devices_pass);
310 311
USE_PASS(multi_devices_check_pass);
USE_PASS(multi_devices_print_pass);
D
dzhwinter 已提交
312
USE_PASS(memory_optimize_pass);
S
sneaxiy 已提交
313
USE_PASS(sequential_execution_pass);
314
USE_PASS(all_reduce_deps_pass);
S
sneaxiy 已提交
315
USE_PASS(modify_op_lock_and_record_event_pass);
D
dzhwinter 已提交
316
USE_PASS(inplace_pass);
M
minqiyang 已提交
317
USE_PASS(lock_free_optimize_pass);
C
chengduo 已提交
318
USE_PASS(alloc_continuous_space_for_grad_pass);
W
WangZhen 已提交
319
USE_PASS(graph_to_program_pass);
C
chengduo 已提交
320 321
USE_PASS(fuse_adam_op_pass);
USE_PASS(fuse_sgd_op_pass);
C
chengduo 已提交
322
USE_PASS(fuse_all_reduce_op_pass);