build_strategy.cc 10.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/details/build_strategy.h"

D
dzhwinter 已提交
17 18 19
#include <glog/logging.h>
#include <memory>

20
#include "paddle/fluid/framework/details/graph_print_pass.h"
D
dzhwinter 已提交
21
#include "paddle/fluid/framework/details/memory_optimize_helper.h"
22
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
23
#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h"
24
#include "paddle/fluid/framework/details/reduce_op_handle.h"
S
sneaxiy 已提交
25
#include "paddle/fluid/framework/details/sequential_execution_pass.h"
26
#include "paddle/fluid/framework/ir/graph.h"
D
dzhwinter 已提交
27
#include "paddle/fluid/framework/ir/graph_helper.h"
28 29 30 31 32 33
#include "paddle/fluid/framework/ir/graph_viz_pass.h"

namespace paddle {
namespace framework {
namespace details {

34
static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
Y
Yancey1989 已提交
35 36
  // Should fix the allreduce op order if scheduling
  // them in multiple threads or processes to avoid hang.
Y
Yancey1989 已提交
37 38 39
  return (!strategy.enable_sequential_execution_ &&
          strategy.num_trainers_ > 1) ||
         strategy.enable_parallel_graph_;
40 41
}

42 43 44 45
class ParallelExecutorPassBuilder : public ir::PassBuilder {
 public:
  explicit ParallelExecutorPassBuilder(const BuildStrategy &strategy)
      : ir::PassBuilder(), strategy_(strategy) {
D
dzhwinter 已提交
46
    if (strategy_.enable_inplace_) {
47 48 49 50 51 52 53 54
      // before inplaced
      // if (!strategy_.debug_graphviz_path_.empty()) {
      //   const std::string path = strategy_.debug_graphviz_path_ +
      //   "before_inplaced";
      //   auto pass = AppendPass("graph_print_pass");
      //   pass->Set<std::string>(kGraphvizPath, new std::string(path));
      // }

D
dzhwinter 已提交
55
      AppendPass("inplace_pass");
56 57 58 59 60 61 62 63
      // after inplaced
      // if (!strategy_.debug_graphviz_path_.empty()) {
      //   const std::string path = strategy_.debug_graphviz_path_ +
      //   "after_inplaced";
      //   auto pass = AppendPass("graph_print_pass");
      //   pass->Set<std::string>(details::kGraphvizPath, new
      //   std::string(path));
      // }
D
dzhwinter 已提交
64
    }
65

S
sneaxiy 已提交
66 67 68 69
    if (strategy_.enable_sequential_execution_) {
      AppendPass("sequential_execution_pass");
    }

X
Xin Pan 已提交
70
    // Add a graph viz pass to record a graph.
71 72 73 74 75 76 77
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto viz_pass = AppendPass("graph_viz_pass");
      const std::string graph_path = string::Sprintf(
          "%s%s", strategy_.debug_graphviz_path_.c_str(), "_original_graph");
      viz_pass->Set<std::string>("graph_viz_path", new std::string(graph_path));
    }

X
Xin Pan 已提交
78
    // Add op fusion.
79 80 81
    if (strategy.fuse_relu_depthwise_conv_) {
      AppendPass("fuse_relu_depthwise_conv_pass");
    }
82
    if (strategy.fuse_elewise_add_act_ops_) {
X
Xin Pan 已提交
83
      auto fuse_elewise_add_act_pass = AppendPass("fuse_elewise_add_act_pass");
X
Xin Pan 已提交
84
      // Add a graph viz pass to record a graph.
85
      if (!strategy.debug_graphviz_path_.empty()) {
X
Xin Pan 已提交
86
        auto viz_pass = AppendPass("graph_viz_pass");
87 88
        const std::string graph_path = string::Sprintf(
            "%s%s", strategy.debug_graphviz_path_.c_str(), "_fused_graph");
X
Xin Pan 已提交
89 90
        viz_pass->Set<std::string>("graph_viz_path",
                                   new std::string(graph_path));
91 92 93
      }
    }

94 95 96 97
    CollectiveContext *context = CollectiveContext::GetInstance();
    context->endpoints_ = strategy_.trainers_endpoints_;
    context->trainer_id_ = strategy_.trainer_id_;
    PADDLE_ENFORCE(strategy_.trainer_id_ >= 0, "trainer_id_ >= 0");
98
    if (strategy_.trainer_id_ > 0 && strategy_.trainers_endpoints_.size() > 0) {
99 100 101 102 103 104
      PADDLE_ENFORCE((unsigned)(strategy_.trainer_id_) <
                         strategy_.trainers_endpoints_.size(),
                     "trainer_id_ < endpoints_ size");
    }
    VLOG(1) << "CollectiveContext:" << context->String();

D
dzhwinter 已提交
105 106 107 108 109 110
    // NOTE(dzh): memory optimize should be a runtime pass.
    // However, after multi_devices_pass, VarHandle, OpHandle is
    // the de-fact IR, any reuse on Graph is meaningless.
    // A side-effect of that, memory optimize cannot forsee the fetched vars
    // , so fetchlist should be set persistable before call the Run interface.
    if (strategy.memory_optimize_) {
D
dzhwinter 已提交
111
      auto memory_optimize_pass = AppendPass("memory_optimize_pass");
D
dzhwinter 已提交
112
    }
113 114

    AppendMultiDevPass(strategy);
115

X
Xin Pan 已提交
116
    // Add a graph print pass to record a graph with device info.
117 118
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto multi_devices_print_pass = AppendPass("multi_devices_print_pass");
D
dzhwinter 已提交
119 120 121 122 123
      const std::string graph_path =
          string::Sprintf("%s%s", strategy_.debug_graphviz_path_.c_str(),
                          "_multi_devices_graph");
      multi_devices_print_pass->Set<std::string>(kGraphvizPath,
                                                 new std::string(graph_path));
124 125 126 127 128 129
      multi_devices_print_pass->Set<details::GraphvizSSAGraphPrinter>(
          "graph_printer", new details::GraphvizSSAGraphPrinter);
    }

    // Verify that the graph is correct for multi-device executor.
    AppendPass("multi_devices_check_pass");
S
sneaxiy 已提交
130

131 132 133 134
    if (SeqOnlyAllReduceOps(strategy)) {
      AppendPass("all_reduce_deps_pass");
    }

S
sneaxiy 已提交
135 136 137
    if (strategy_.remove_unnecessary_lock_) {
      AppendPass("modify_op_lock_and_record_event_pass");
    }
138 139
  }

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
  // Convert graph to run on multi-devices.
  void AppendMultiDevPass(const BuildStrategy &strategy) {
    ir::Pass *multi_devices_pass;
    if (strategy_.is_distribution_) {
      multi_devices_pass = AppendPass("dist_multi_devices_pass").get();
    } else {
      if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
        multi_devices_pass =
            AppendPass("allreduce_mode_multi_devices_pass").get();
      } else if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kReduce) {
        multi_devices_pass = AppendPass("reduce_mode_multi_devices_pass").get();
      } else {
        PADDLE_THROW("Unknown reduce strategy.");
      }
    }
    multi_devices_pass->SetNotOwned<const BuildStrategy>("strategy",
                                                         &strategy_);
  }

159 160 161 162
 private:
  BuildStrategy strategy_;
};

163
std::shared_ptr<ir::PassBuilder> BuildStrategy::CreatePassesFromStrategy(
X
Xin Pan 已提交
164 165
    bool finalize_strategy) const {
  if (is_finalized_) {
166 167
    return pass_builder_;
  }
168
  pass_builder_.reset(new ParallelExecutorPassBuilder(*this));
X
Xin Pan 已提交
169 170
  if (finalize_strategy) {
    is_finalized_ = true;
171
  }
X
fix  
Xin Pan 已提交
172
  return pass_builder_;
173 174
}

175 176 177 178
bool BuildStrategy::IsMultiDevPass(const std::string &pass_name) const {
  return framework::details::MultiDevSSAGraphBuilder().count(pass_name) > 0;
}

179 180
std::unique_ptr<ir::Graph> BuildStrategy::Apply(
    const ProgramDesc &main_program, const std::vector<platform::Place> &places,
181
    const std::string &loss_var_name, const std::vector<Scope *> &local_scopes,
182
    const size_t &nranks,
P
peizhilin 已提交
183
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
184 185 186 187
    const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const {
#else
    const bool use_cuda) const {
#endif
188 189
  // Create a default one if not finalized by user.
  CreatePassesFromStrategy(false);
X
fix  
Xin Pan 已提交
190 191 192

  std::unique_ptr<ir::Graph> graph(new ir::Graph(main_program));
  for (std::shared_ptr<ir::Pass> &pass : pass_builder_->AllPasses()) {
193 194 195 196 197 198 199
    if (IsMultiDevPass(pass->Type())) {
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLossVarName);
      pass->SetNotOwned<const std::string>(kLossVarName, &loss_var_name);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
X
fix  
Xin Pan 已提交
200
                                                    &local_scopes);
201 202
      pass->Erase(kNRanks);
      pass->Set<size_t>(kNRanks, new size_t(nranks));
Y
Yancey1989 已提交
203

P
peizhilin 已提交
204
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
fix  
Xin Pan 已提交
205 206 207
      platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
      pass->Erase("nccl_ctxs");
      pass->SetNotOwned<platform::NCCLContextMap>("nccl_ctxs", nctx);
208
#endif
D
dzhwinter 已提交
209
    } else if (pass->Type() == "memory_optimize_pass") {
210 211 212
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
D
dzhwinter 已提交
213 214 215 216 217 218 219 220 221 222
      const std::vector<OpDesc *> *all_op_descs =
          new std::vector<OpDesc *>(main_program.Block(0).AllOps());
      graph->Set<const std::vector<OpDesc *>>(kAllOpDescs,
                                              all_op_descs);  // take ownership
      graph->Set<GraphNodePool>(kGraphNodePool,
                                new GraphNodePool);  // take ownership

      pass->Erase(kAllOpDescs);
      pass->SetNotOwned<const std::vector<OpDesc *>>(kAllOpDescs, all_op_descs);

S
sneaxiy 已提交
223
    } else if (pass->Type() == "sequential_execution_pass") {
224 225
      LOG(INFO) << "set enable_sequential_execution:"
                << enable_sequential_execution_;
226 227 228 229 230 231

      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
    } else if (pass->Type() == "all_reduce_deps_pass") {
232 233
      LOG(INFO) << "SeqOnlyAllReduceOps:" << SeqOnlyAllReduceOps(*this)
                << ", num_trainers:" << num_trainers_;
234

S
sneaxiy 已提交
235 236 237 238
      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
D
dzhwinter 已提交
239 240 241 242
    } else if (pass->Type() == "inplace_pass") {
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
243 244 245
      if (!graph->Has(kGraphviz)) {
        graph->Set<GraphvizNodes>(kGraphviz, new GraphvizNodes);
      }
D
dzhwinter 已提交
246 247 248
      graph->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
249 250 251 252 253 254
    } else if (pass->Type() == "fuse_relu_depthwise_conv_pass") {
      if (!use_cuda) {
        LOG(WARNING) << "fuse_relu_depthwise_conv_pass is only supported on "
                        "GPU, skipped.";
        continue;
      }
255 256 257 258
    } else if (pass->Type() == "graph_print_path") {
      if (!graph->Has(kGraphviz)) {
        graph->Set<GraphvizNodes>(kGraphviz, new GraphvizNodes);
      }
X
fix  
Xin Pan 已提交
259 260 261
    }
    graph = pass->Apply(std::move(graph));
  }
262 263
  return graph;
}
D
dzhwinter 已提交
264

265 266 267 268
}  // namespace details
}  // namespace framework
}  // namespace paddle

269
USE_PASS(fuse_relu_depthwise_conv_pass);
270 271
USE_PASS(fuse_elewise_add_act_pass);
USE_PASS(graph_viz_pass);
272
USE_PASS(multi_batch_merge_pass);
273 274 275
USE_PASS(reduce_mode_multi_devices_pass);
USE_PASS(allreduce_mode_multi_devices_pass);
USE_PASS(dist_multi_devices_pass);
276 277
USE_PASS(multi_devices_check_pass);
USE_PASS(multi_devices_print_pass);
D
dzhwinter 已提交
278
USE_PASS(memory_optimize_pass);
S
sneaxiy 已提交
279
USE_PASS(sequential_execution_pass);
280
USE_PASS(all_reduce_deps_pass);
S
sneaxiy 已提交
281
USE_PASS(modify_op_lock_and_record_event_pass);
D
dzhwinter 已提交
282
USE_PASS(inplace_pass);
M
minqiyang 已提交
283
USE_PASS(lock_free_optimize_pass);
284
USE_PASS(graph_print_pass);