build_strategy.cc 13.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/details/build_strategy.h"

D
dzhwinter 已提交
17 18
#include <glog/logging.h>
#include <memory>
Q
Qiao Longfei 已提交
19
#include <utility>
D
dzhwinter 已提交
20
#include "paddle/fluid/framework/details/memory_optimize_helper.h"
21
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
22
#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h"
23
#include "paddle/fluid/framework/details/reduce_op_handle.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/framework/details/sequential_execution_pass.h"
25
#include "paddle/fluid/framework/ir/graph.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/framework/ir/graph_helper.h"
W
WangZhen 已提交
27
#include "paddle/fluid/framework/ir/graph_to_program_pass.h"
28 29 30 31 32 33
#include "paddle/fluid/framework/ir/graph_viz_pass.h"

namespace paddle {
namespace framework {
namespace details {

34
static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
Y
Yancey1989 已提交
35 36
  // Should fix the allreduce op order if scheduling
  // them in multiple threads or processes to avoid hang.
Y
Yancey1989 已提交
37
  // NOTE: ParallelGraph would execute this pass on each graph, so
Y
Yancey1989 已提交
38
  // don't need to append it here.
Y
Yancey1989 已提交
39
  return (!strategy.enable_sequential_execution_ &&
Y
Yancey1989 已提交
40 41
          strategy.num_trainers_ > 1) &&
         !strategy.enable_parallel_graph_;
42 43
}

44 45 46 47
class ParallelExecutorPassBuilder : public ir::PassBuilder {
 public:
  explicit ParallelExecutorPassBuilder(const BuildStrategy &strategy)
      : ir::PassBuilder(), strategy_(strategy) {
C
chengduo 已提交
48 49 50 51 52 53 54 55
    // Add a graph viz pass to record a graph.
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto viz_pass = AppendPass("graph_viz_pass");
      const std::string graph_path = string::Sprintf(
          "%s%s", strategy_.debug_graphviz_path_.c_str(), "_original_graph");
      viz_pass->Set<std::string>("graph_viz_path", new std::string(graph_path));
    }

Z
Zeng Jinle 已提交
56 57 58
    // Note(zcd): record_skip_memory_opt_vars_pass should be the first pass.
    AppendPass("record_skip_memory_opt_vars_pass");

S
sneaxiy 已提交
59
    if (strategy_.enable_sequential_execution_) {
C
chengduo 已提交
60
      VLOG(10) << "Add sequential_execution_pass";
S
sneaxiy 已提交
61 62 63
      AppendPass("sequential_execution_pass");
    }

Q
qingqing01 已提交
64 65 66 67 68
    // Add op fusion.
    if (strategy.sync_batch_norm_) {
      AppendPass("sync_batch_norm_pass");
    }

D
dzhwinter 已提交
69 70
    // Add op fusion.
    if (strategy.fuse_relu_depthwise_conv_) {
C
chengduo 已提交
71
      VLOG(10) << "Add fuse_relu_depthwise_conv_pass";
D
dzhwinter 已提交
72
      AppendPass("fuse_relu_depthwise_conv_pass");
D
dzhwinter 已提交
73
    }
74

D
dzhwinter 已提交
75 76 77 78 79 80
    // NOTE(dzhwinter): A note for automatical inplace.
    // 1. modify program desc passes should put
    // before inplace pass.
    // 2. manually configured inplace should put
    // before inplace_pass

D
dzhwinter 已提交
81 82
    // Add automatically inplace.
    if (strategy_.enable_inplace_) {
C
chengduo 已提交
83
      VLOG(10) << "Add inplace_pass";
D
dzhwinter 已提交
84
      AppendPass("inplace_pass");
S
sneaxiy 已提交
85 86
    }

C
chengduo 已提交
87
    if (strategy_.fuse_elewise_add_act_ops_) {
C
chengduo 已提交
88 89 90 91 92 93
      VLOG(10) << "Add fuse_elewise_add_act_pass";
      AppendPass("fuse_elewise_add_act_pass");
    }

    // for single card training, fuse_all_reduce_ops is unnecessary.
    // alloc_continuous_space_for_grad_pass should be before of MultiDevPass.
C
chengduo 已提交
94
    if (strategy_.fuse_all_reduce_ops_) {
C
chengduo 已提交
95 96 97 98
      VLOG(10) << "Add alloc_continuous_space_for_grad_pass";
      AppendPass("alloc_continuous_space_for_grad_pass");
    }

C
chengduo 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
    if (strategy_.fuse_all_optimizer_ops_) {
      if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce ||
          strategy_.is_distribution_) {
        VLOG(3)
            << "Currently, fuse_all_optimizer_ops only works under AllReduce "
               "mode.";
        strategy_.fuse_all_optimizer_ops_ = false;
      } else {
        // NOTE: fuse_all_xx_ops will count the number of xx operator first,
        // if the number is zero, fuse_all_reduce_ops will do nothing.
        // Currently, only one type of optimization algorithm can be fused.
        VLOG(10) << "Add fuse_adam_op_pass";
        AppendPass("fuse_adam_op_pass");
        VLOG(10) << "Add fuse_sgd_op_pass";
        AppendPass("fuse_sgd_op_pass");
      }
    }

X
Xin Pan 已提交
117
    // Add a graph viz pass to record a graph.
C
chengduo 已提交
118
    if (!strategy.debug_graphviz_path_.empty()) {
119 120
      auto viz_pass = AppendPass("graph_viz_pass");
      const std::string graph_path = string::Sprintf(
C
chengduo 已提交
121
          "%s%s", strategy_.debug_graphviz_path_.c_str(), "_fused_graph");
122 123 124
      viz_pass->Set<std::string>("graph_viz_path", new std::string(graph_path));
    }

125 126 127 128
    CollectiveContext *context = CollectiveContext::GetInstance();
    context->endpoints_ = strategy_.trainers_endpoints_;
    context->trainer_id_ = strategy_.trainer_id_;
    PADDLE_ENFORCE(strategy_.trainer_id_ >= 0, "trainer_id_ >= 0");
129
    if (strategy_.trainer_id_ > 0 && strategy_.trainers_endpoints_.size() > 0) {
130 131 132 133 134 135
      PADDLE_ENFORCE((unsigned)(strategy_.trainer_id_) <
                         strategy_.trainers_endpoints_.size(),
                     "trainer_id_ < endpoints_ size");
    }
    VLOG(1) << "CollectiveContext:" << context->String();

D
dzhwinter 已提交
136 137 138 139 140
    // NOTE(dzh): memory optimize should be a runtime pass.
    // However, after multi_devices_pass, VarHandle, OpHandle is
    // the de-fact IR, any reuse on Graph is meaningless.
    // A side-effect of that, memory optimize cannot forsee the fetched vars
    // , so fetchlist should be set persistable before call the Run interface.
C
chengduo 已提交
141
    if (strategy_.memory_optimize_) {
C
chengduo 已提交
142 143
      VLOG(10) << "Add memory_optimize_pass";
      AppendPass("memory_optimize_pass");
D
dzhwinter 已提交
144
    }
145

146 147 148 149 150 151 152 153
    // runtime_context_cache pass should be the last pass to enable the attr of
    // all original and fused operators. But no operators can be enabled this
    // attr if putting it after MultiDevPass.
    if (strategy_.cache_runtime_context_) {
      VLOG(10) << "Add runtime_context_cache_pass";
      AppendPass("runtime_context_cache_pass");
    }

154 155 156 157 158
    if (strategy_.cache_expected_kernel_) {
      VLOG(10) << "Add expected_kernel_cache_pass";
      AppendPass("expected_kernel_cache_pass");
    }

C
chengduo 已提交
159
    AppendMultiDevPass(strategy_);
160

C
chengduo 已提交
161
    if (strategy_.fuse_all_reduce_ops_) {
C
chengduo 已提交
162 163 164 165 166 167
      // NOTE: fuse_all_reduce_ops will count the number of all_reduce operator
      // first, if the number is zero, fuse_all_reduce_ops will do nothing.
      VLOG(10) << "Add fuse_all_reduce_op_pass";
      AppendPass("fuse_all_reduce_op_pass");
    }

X
Xin Pan 已提交
168
    // Add a graph print pass to record a graph with device info.
169 170
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto multi_devices_print_pass = AppendPass("multi_devices_print_pass");
D
dzhwinter 已提交
171 172 173 174 175
      const std::string graph_path =
          string::Sprintf("%s%s", strategy_.debug_graphviz_path_.c_str(),
                          "_multi_devices_graph");
      multi_devices_print_pass->Set<std::string>(kGraphvizPath,
                                                 new std::string(graph_path));
176 177 178 179
      multi_devices_print_pass->Set<details::GraphvizSSAGraphPrinter>(
          "graph_printer", new details::GraphvizSSAGraphPrinter);
    }

180 181 182 183 184
    // experimental shows that the program will be faster if append
    // all_reduce_deps_pass here.
    if (!strategy_.enable_parallel_graph_ &&
        (SeqOnlyAllReduceOps(strategy_) ||
         strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce)) {
C
chengduo 已提交
185
      VLOG(10) << "Add all_reduce_deps_pass";
186 187 188
      AppendPass("all_reduce_deps_pass");
    }

S
sneaxiy 已提交
189
    if (strategy_.remove_unnecessary_lock_) {
C
chengduo 已提交
190
      VLOG(10) << "Add modify_op_lock_and_record_event_pass";
S
sneaxiy 已提交
191 192
      AppendPass("modify_op_lock_and_record_event_pass");
    }
193 194 195

    // Verify that the graph is correct for multi-device executor.
    AppendPass("multi_devices_check_pass");
196 197
  }

198 199
  // Convert graph to run on multi-devices.
  void AppendMultiDevPass(const BuildStrategy &strategy) {
C
chengduo 已提交
200
    ir::Pass *multi_devices_pass = nullptr;
Q
can run  
Qiao Longfei 已提交
201

Q
Qiao Longfei 已提交
202 203 204
    if (strategy_.async_mode_) {
      multi_devices_pass = AppendPass("async_multi_devices_pass").get();
    } else if (strategy_.is_distribution_) {
205 206
      VLOG(10)
          << "Add dist_multi_devices_pass, multi device parameter server mode";
207 208 209
      multi_devices_pass = AppendPass("dist_multi_devices_pass").get();
    } else {
      if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
C
chengduo 已提交
210
        VLOG(10) << "Add all_reduce_mode_multi_devices_pass";
211
        multi_devices_pass =
C
chengduo 已提交
212
            AppendPass("all_reduce_mode_multi_devices_pass").get();
213
      } else if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kReduce) {
C
chengduo 已提交
214
        VLOG(10) << "Add reduce_mode_multi_devices_pass";
215 216 217 218 219 220 221 222 223
        multi_devices_pass = AppendPass("reduce_mode_multi_devices_pass").get();
      } else {
        PADDLE_THROW("Unknown reduce strategy.");
      }
    }
    multi_devices_pass->SetNotOwned<const BuildStrategy>("strategy",
                                                         &strategy_);
  }

224 225 226 227
 private:
  BuildStrategy strategy_;
};

228
std::shared_ptr<ir::PassBuilder> BuildStrategy::CreatePassesFromStrategy(
X
Xin Pan 已提交
229 230
    bool finalize_strategy) const {
  if (is_finalized_) {
231 232
    return pass_builder_;
  }
233
  pass_builder_.reset(new ParallelExecutorPassBuilder(*this));
X
Xin Pan 已提交
234 235
  if (finalize_strategy) {
    is_finalized_ = true;
236
  }
X
fix  
Xin Pan 已提交
237
  return pass_builder_;
238 239
}

240 241 242 243
bool BuildStrategy::IsMultiDevPass(const std::string &pass_name) const {
  return framework::details::MultiDevSSAGraphBuilder().count(pass_name) > 0;
}

244 245 246 247 248
ir::Graph *BuildStrategy::Apply(ir::Graph *graph,
                                const std::vector<platform::Place> &places,
                                const std::string &loss_var_name,
                                const std::vector<Scope *> &local_scopes,
                                const size_t &nranks,
P
peizhilin 已提交
249
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
250 251
                                const bool use_cuda,
                                platform::NCCLContextMap *nccl_ctxs) const {
252
#else
253
                                const bool use_cuda) const {
254
#endif
255
  VLOG(3) << "apply all passes";
256 257
  // Create a default one if not finalized by user.
  CreatePassesFromStrategy(false);
X
fix  
Xin Pan 已提交
258 259

  for (std::shared_ptr<ir::Pass> &pass : pass_builder_->AllPasses()) {
G
gongweibao 已提交
260
    VLOG(3) << "BuildStrategy::Apply pass:" << pass->Type();
261 262 263 264 265 266 267
    if (IsMultiDevPass(pass->Type())) {
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLossVarName);
      pass->SetNotOwned<const std::string>(kLossVarName, &loss_var_name);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
X
fix  
Xin Pan 已提交
268
                                                    &local_scopes);
269 270
      pass->Erase(kNRanks);
      pass->Set<size_t>(kNRanks, new size_t(nranks));
Y
Yancey1989 已提交
271

P
peizhilin 已提交
272
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
fix  
Xin Pan 已提交
273
      platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
C
chengduo 已提交
274 275
      pass->Erase(kNCCLCtxs);
      pass->SetNotOwned<platform::NCCLContextMap>(kNCCLCtxs, nctx);
276
#endif
C
chengduo 已提交
277 278 279 280
    } else if (pass->Type() == "alloc_continuous_space_for_grad_pass" ||
               pass->Type() == "fuse_adam_op_pass" ||
               pass->Type() == "fuse_sgd_op_pass" ||
               pass->Type() == "fuse_all_reduce_op_pass") {
C
chengduo 已提交
281 282 283 284 285
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
                                                    &local_scopes);
C
chengduo 已提交
286
      if (pass->Type() == "fuse_all_reduce_op_pass") {
C
chengduo 已提交
287
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
C
chengduo 已提交
288 289 290
        platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
        pass->Erase(kNCCLCtxs);
        pass->SetNotOwned<platform::NCCLContextMap>(kNCCLCtxs, nctx);
291
#endif
C
chengduo 已提交
292
      }
C
chengduo 已提交
293 294 295 296 297 298
    } else if (pass->Type() == "alloc_continuous_space_for_grad_pass") {
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
                                                    &local_scopes);
S
sneaxiy 已提交
299
    } else if (pass->Type() == "sequential_execution_pass") {
300 301
      LOG(INFO) << "set enable_sequential_execution:"
                << enable_sequential_execution_;
302
    } else if (pass->Type() == "all_reduce_deps_pass") {
303 304
      LOG(INFO) << "SeqOnlyAllReduceOps:" << SeqOnlyAllReduceOps(*this)
                << ", num_trainers:" << num_trainers_;
305 306 307 308 309 310
    } else if (pass->Type() == "fuse_relu_depthwise_conv_pass") {
      if (!use_cuda) {
        LOG(WARNING) << "fuse_relu_depthwise_conv_pass is only supported on "
                        "GPU, skipped.";
        continue;
      }
X
fix  
Xin Pan 已提交
311
    }
312
    VLOG(3) << "Start Apply Pass " << pass->Type();
313
    graph = pass->Apply(graph);
314
    VLOG(3) << "Finish Apply Pass " << pass->Type();
X
fix  
Xin Pan 已提交
315
  }
Q
Qiao Longfei 已提交
316
  VLOG(3) << "All Passes Applied";
317 318
  return graph;
}
D
dzhwinter 已提交
319

320 321 322 323
}  // namespace details
}  // namespace framework
}  // namespace paddle

Q
qingqing01 已提交
324
USE_PASS(sync_batch_norm_pass);
325
USE_PASS(fuse_relu_depthwise_conv_pass);
326 327
USE_PASS(fuse_elewise_add_act_pass);
USE_PASS(graph_viz_pass);
328
USE_PASS(multi_batch_merge_pass);
329
USE_PASS(reduce_mode_multi_devices_pass);
C
chengduo 已提交
330
USE_PASS(all_reduce_mode_multi_devices_pass);
331
USE_PASS(dist_multi_devices_pass);
332 333
USE_PASS(multi_devices_check_pass);
USE_PASS(multi_devices_print_pass);
D
dzhwinter 已提交
334
USE_PASS(memory_optimize_pass);
S
sneaxiy 已提交
335
USE_PASS(sequential_execution_pass);
336
USE_PASS(all_reduce_deps_pass);
S
sneaxiy 已提交
337
USE_PASS(modify_op_lock_and_record_event_pass);
D
dzhwinter 已提交
338
USE_PASS(inplace_pass);
M
minqiyang 已提交
339
USE_PASS(lock_free_optimize_pass);
C
chengduo 已提交
340
USE_PASS(alloc_continuous_space_for_grad_pass);
W
WangZhen 已提交
341
USE_PASS(graph_to_program_pass);
C
chengduo 已提交
342 343
USE_PASS(fuse_adam_op_pass);
USE_PASS(fuse_sgd_op_pass);
C
chengduo 已提交
344
USE_PASS(fuse_all_reduce_op_pass);
345
USE_PASS(runtime_context_cache_pass);
346
USE_PASS(expected_kernel_cache_pass);
Z
Zeng Jinle 已提交
347
USE_PASS(record_skip_memory_opt_vars_pass);