build_strategy.cc 10.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/details/build_strategy.h"

D
dzhwinter 已提交
17 18 19
#include <glog/logging.h>
#include <memory>

D
dzhwinter 已提交
20
#include "paddle/fluid/framework/details/memory_optimize_helper.h"
21
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
22
#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h"
23
#include "paddle/fluid/framework/details/reduce_op_handle.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/framework/details/sequential_execution_pass.h"
25
#include "paddle/fluid/framework/ir/graph.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/framework/ir/graph_helper.h"
W
WangZhen 已提交
27
#include "paddle/fluid/framework/ir/graph_to_program_pass.h"
28 29 30 31 32 33
#include "paddle/fluid/framework/ir/graph_viz_pass.h"

namespace paddle {
namespace framework {
namespace details {

34
static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
Y
Yancey1989 已提交
35 36
  // Should fix the allreduce op order if scheduling
  // them in multiple threads or processes to avoid hang.
Y
Yancey1989 已提交
37
  return (!strategy.enable_sequential_execution_ &&
Y
Yancey1989 已提交
38 39
          strategy.num_trainers_ > 1) &&
         !strategy.enable_parallel_graph_;
40 41
}

42 43 44 45
class ParallelExecutorPassBuilder : public ir::PassBuilder {
 public:
  explicit ParallelExecutorPassBuilder(const BuildStrategy &strategy)
      : ir::PassBuilder(), strategy_(strategy) {
S
sneaxiy 已提交
46 47 48 49
    if (strategy_.enable_sequential_execution_) {
      AppendPass("sequential_execution_pass");
    }

D
dzhwinter 已提交
50 51 52
    // Add op fusion.
    if (strategy.fuse_relu_depthwise_conv_) {
      AppendPass("fuse_relu_depthwise_conv_pass");
D
dzhwinter 已提交
53
    }
54

D
dzhwinter 已提交
55 56 57 58 59 60
    // NOTE(dzhwinter): A note for automatical inplace.
    // 1. modify program desc passes should put
    // before inplace pass.
    // 2. manually configured inplace should put
    // before inplace_pass

D
dzhwinter 已提交
61 62 63
    // Add automatically inplace.
    if (strategy_.enable_inplace_) {
      AppendPass("inplace_pass");
S
sneaxiy 已提交
64 65
    }

X
Xin Pan 已提交
66
    // Add a graph viz pass to record a graph.
67 68 69 70 71 72 73 74
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto viz_pass = AppendPass("graph_viz_pass");
      const std::string graph_path = string::Sprintf(
          "%s%s", strategy_.debug_graphviz_path_.c_str(), "_original_graph");
      viz_pass->Set<std::string>("graph_viz_path", new std::string(graph_path));
    }

    if (strategy.fuse_elewise_add_act_ops_) {
X
Xin Pan 已提交
75
      auto fuse_elewise_add_act_pass = AppendPass("fuse_elewise_add_act_pass");
X
Xin Pan 已提交
76
      // Add a graph viz pass to record a graph.
77
      if (!strategy.debug_graphviz_path_.empty()) {
X
Xin Pan 已提交
78
        auto viz_pass = AppendPass("graph_viz_pass");
79 80
        const std::string graph_path = string::Sprintf(
            "%s%s", strategy.debug_graphviz_path_.c_str(), "_fused_graph");
X
Xin Pan 已提交
81 82
        viz_pass->Set<std::string>("graph_viz_path",
                                   new std::string(graph_path));
83 84 85
      }
    }

86 87 88 89
    CollectiveContext *context = CollectiveContext::GetInstance();
    context->endpoints_ = strategy_.trainers_endpoints_;
    context->trainer_id_ = strategy_.trainer_id_;
    PADDLE_ENFORCE(strategy_.trainer_id_ >= 0, "trainer_id_ >= 0");
90
    if (strategy_.trainer_id_ > 0 && strategy_.trainers_endpoints_.size() > 0) {
91 92 93 94 95 96
      PADDLE_ENFORCE((unsigned)(strategy_.trainer_id_) <
                         strategy_.trainers_endpoints_.size(),
                     "trainer_id_ < endpoints_ size");
    }
    VLOG(1) << "CollectiveContext:" << context->String();

D
dzhwinter 已提交
97 98 99 100 101 102
    // NOTE(dzh): memory optimize should be a runtime pass.
    // However, after multi_devices_pass, VarHandle, OpHandle is
    // the de-fact IR, any reuse on Graph is meaningless.
    // A side-effect of that, memory optimize cannot forsee the fetched vars
    // , so fetchlist should be set persistable before call the Run interface.
    if (strategy.memory_optimize_) {
D
dzhwinter 已提交
103
      auto memory_optimize_pass = AppendPass("memory_optimize_pass");
D
dzhwinter 已提交
104
    }
105 106

    AppendMultiDevPass(strategy);
107

X
Xin Pan 已提交
108
    // Add a graph print pass to record a graph with device info.
109 110
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto multi_devices_print_pass = AppendPass("multi_devices_print_pass");
D
dzhwinter 已提交
111 112 113 114 115
      const std::string graph_path =
          string::Sprintf("%s%s", strategy_.debug_graphviz_path_.c_str(),
                          "_multi_devices_graph");
      multi_devices_print_pass->Set<std::string>(kGraphvizPath,
                                                 new std::string(graph_path));
116 117 118 119 120
      multi_devices_print_pass->Set<details::GraphvizSSAGraphPrinter>(
          "graph_printer", new details::GraphvizSSAGraphPrinter);
    }

    // Verify that the graph is correct for multi-device executor.
Y
Yancey1989 已提交
121 122 123
    auto multi_devices_pass = AppendPass("multi_devices_check_pass");
    multi_devices_pass->Set<bool>(kEnablePG,
                                  new bool(strategy.enable_parallel_graph_));
S
sneaxiy 已提交
124

125 126 127 128
    if (SeqOnlyAllReduceOps(strategy)) {
      AppendPass("all_reduce_deps_pass");
    }

S
sneaxiy 已提交
129 130 131
    if (strategy_.remove_unnecessary_lock_) {
      AppendPass("modify_op_lock_and_record_event_pass");
    }
132 133
  }

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
  // Convert graph to run on multi-devices.
  void AppendMultiDevPass(const BuildStrategy &strategy) {
    ir::Pass *multi_devices_pass;
    if (strategy_.is_distribution_) {
      multi_devices_pass = AppendPass("dist_multi_devices_pass").get();
    } else {
      if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
        multi_devices_pass =
            AppendPass("allreduce_mode_multi_devices_pass").get();
      } else if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kReduce) {
        multi_devices_pass = AppendPass("reduce_mode_multi_devices_pass").get();
      } else {
        PADDLE_THROW("Unknown reduce strategy.");
      }
    }
    multi_devices_pass->SetNotOwned<const BuildStrategy>("strategy",
                                                         &strategy_);
  }

153 154 155 156
 private:
  BuildStrategy strategy_;
};

157
std::shared_ptr<ir::PassBuilder> BuildStrategy::CreatePassesFromStrategy(
X
Xin Pan 已提交
158 159
    bool finalize_strategy) const {
  if (is_finalized_) {
160 161
    return pass_builder_;
  }
162
  pass_builder_.reset(new ParallelExecutorPassBuilder(*this));
X
Xin Pan 已提交
163 164
  if (finalize_strategy) {
    is_finalized_ = true;
165
  }
X
fix  
Xin Pan 已提交
166
  return pass_builder_;
167 168
}

169 170 171 172
bool BuildStrategy::IsMultiDevPass(const std::string &pass_name) const {
  return framework::details::MultiDevSSAGraphBuilder().count(pass_name) > 0;
}

173 174
std::unique_ptr<ir::Graph> BuildStrategy::Apply(
    const ProgramDesc &main_program, const std::vector<platform::Place> &places,
175
    const std::string &loss_var_name, const std::vector<Scope *> &local_scopes,
176
    const size_t &nranks,
P
peizhilin 已提交
177
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
178 179 180 181
    const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const {
#else
    const bool use_cuda) const {
#endif
182 183
  // Create a default one if not finalized by user.
  CreatePassesFromStrategy(false);
X
fix  
Xin Pan 已提交
184 185 186

  std::unique_ptr<ir::Graph> graph(new ir::Graph(main_program));
  for (std::shared_ptr<ir::Pass> &pass : pass_builder_->AllPasses()) {
187 188 189 190 191 192 193
    if (IsMultiDevPass(pass->Type())) {
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLossVarName);
      pass->SetNotOwned<const std::string>(kLossVarName, &loss_var_name);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
X
fix  
Xin Pan 已提交
194
                                                    &local_scopes);
195 196
      pass->Erase(kNRanks);
      pass->Set<size_t>(kNRanks, new size_t(nranks));
Y
Yancey1989 已提交
197 198
      pass->Erase(kEnablePG);
      pass->Set<bool>(kEnablePG, new bool(true));
Y
Yancey1989 已提交
199

P
peizhilin 已提交
200
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
fix  
Xin Pan 已提交
201 202 203
      platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
      pass->Erase("nccl_ctxs");
      pass->SetNotOwned<platform::NCCLContextMap>("nccl_ctxs", nctx);
204
#endif
D
dzhwinter 已提交
205
    } else if (pass->Type() == "memory_optimize_pass") {
206 207 208
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
D
dzhwinter 已提交
209 210 211 212 213 214 215 216
      const std::vector<OpDesc *> *all_op_descs =
          new std::vector<OpDesc *>(main_program.Block(0).AllOps());
      graph->Set<const std::vector<OpDesc *>>(kAllOpDescs,
                                              all_op_descs);  // take ownership

      pass->Erase(kAllOpDescs);
      pass->SetNotOwned<const std::vector<OpDesc *>>(kAllOpDescs, all_op_descs);

S
sneaxiy 已提交
217
    } else if (pass->Type() == "sequential_execution_pass") {
218 219
      LOG(INFO) << "set enable_sequential_execution:"
                << enable_sequential_execution_;
220 221 222 223 224 225

      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
    } else if (pass->Type() == "all_reduce_deps_pass") {
226 227
      LOG(INFO) << "SeqOnlyAllReduceOps:" << SeqOnlyAllReduceOps(*this)
                << ", num_trainers:" << num_trainers_;
228

S
sneaxiy 已提交
229 230 231 232
      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
D
dzhwinter 已提交
233 234 235 236 237 238 239
    } else if (pass->Type() == "inplace_pass") {
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
      graph->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
240 241 242 243 244 245
    } else if (pass->Type() == "fuse_relu_depthwise_conv_pass") {
      if (!use_cuda) {
        LOG(WARNING) << "fuse_relu_depthwise_conv_pass is only supported on "
                        "GPU, skipped.";
        continue;
      }
X
fix  
Xin Pan 已提交
246 247 248
    }
    graph = pass->Apply(std::move(graph));
  }
249 250
  return graph;
}
D
dzhwinter 已提交
251

252 253 254 255
}  // namespace details
}  // namespace framework
}  // namespace paddle

256
USE_PASS(fuse_relu_depthwise_conv_pass);
257 258
USE_PASS(fuse_elewise_add_act_pass);
USE_PASS(graph_viz_pass);
259
USE_PASS(multi_batch_merge_pass);
260 261 262
USE_PASS(reduce_mode_multi_devices_pass);
USE_PASS(allreduce_mode_multi_devices_pass);
USE_PASS(dist_multi_devices_pass);
263 264
USE_PASS(multi_devices_check_pass);
USE_PASS(multi_devices_print_pass);
D
dzhwinter 已提交
265
USE_PASS(memory_optimize_pass);
S
sneaxiy 已提交
266
USE_PASS(sequential_execution_pass);
267
USE_PASS(all_reduce_deps_pass);
S
sneaxiy 已提交
268
USE_PASS(modify_op_lock_and_record_event_pass);
D
dzhwinter 已提交
269
USE_PASS(inplace_pass);
M
minqiyang 已提交
270
USE_PASS(lock_free_optimize_pass);
W
WangZhen 已提交
271
USE_PASS(graph_to_program_pass);