build_strategy.cc 10.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/details/build_strategy.h"

D
dzhwinter 已提交
17 18 19
#include <glog/logging.h>
#include <memory>

20
#include "paddle/fluid/framework/details/graph_print_pass.h"
D
dzhwinter 已提交
21
#include "paddle/fluid/framework/details/memory_optimize_helper.h"
22
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
23
#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h"
24
#include "paddle/fluid/framework/details/reduce_op_handle.h"
S
sneaxiy 已提交
25
#include "paddle/fluid/framework/details/sequential_execution_pass.h"
26
#include "paddle/fluid/framework/ir/graph.h"
D
dzhwinter 已提交
27
#include "paddle/fluid/framework/ir/graph_helper.h"
W
WangZhen 已提交
28
#include "paddle/fluid/framework/ir/graph_to_program_pass.h"
29 30 31 32 33 34
#include "paddle/fluid/framework/ir/graph_viz_pass.h"

namespace paddle {
namespace framework {
namespace details {

35
static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
Y
Yancey1989 已提交
36 37
  // Should fix the allreduce op order if scheduling
  // them in multiple threads or processes to avoid hang.
Y
Yancey1989 已提交
38 39 40
  return (!strategy.enable_sequential_execution_ &&
          strategy.num_trainers_ > 1) ||
         strategy.enable_parallel_graph_;
41 42
}

43 44 45 46
class ParallelExecutorPassBuilder : public ir::PassBuilder {
 public:
  explicit ParallelExecutorPassBuilder(const BuildStrategy &strategy)
      : ir::PassBuilder(), strategy_(strategy) {
D
dzhwinter 已提交
47 48 49
    if (strategy_.enable_sequential_execution_) {
      AppendPass("sequential_execution_pass");
    }
50

D
dzhwinter 已提交
51 52 53
    // Add op fusion.
    if (strategy.fuse_relu_depthwise_conv_) {
      AppendPass("fuse_relu_depthwise_conv_pass");
D
dzhwinter 已提交
54
    }
55

D
dzhwinter 已提交
56 57 58
    // Add automatically inplace.
    if (strategy_.enable_inplace_) {
      AppendPass("inplace_pass");
S
sneaxiy 已提交
59 60
    }

X
Xin Pan 已提交
61
    // Add a graph viz pass to record a graph.
62 63 64 65 66 67 68 69
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto viz_pass = AppendPass("graph_viz_pass");
      const std::string graph_path = string::Sprintf(
          "%s%s", strategy_.debug_graphviz_path_.c_str(), "_original_graph");
      viz_pass->Set<std::string>("graph_viz_path", new std::string(graph_path));
    }

    if (strategy.fuse_elewise_add_act_ops_) {
X
Xin Pan 已提交
70
      auto fuse_elewise_add_act_pass = AppendPass("fuse_elewise_add_act_pass");
X
Xin Pan 已提交
71
      // Add a graph viz pass to record a graph.
72
      if (!strategy.debug_graphviz_path_.empty()) {
X
Xin Pan 已提交
73
        auto viz_pass = AppendPass("graph_viz_pass");
74 75
        const std::string graph_path = string::Sprintf(
            "%s%s", strategy.debug_graphviz_path_.c_str(), "_fused_graph");
X
Xin Pan 已提交
76 77
        viz_pass->Set<std::string>("graph_viz_path",
                                   new std::string(graph_path));
78 79 80
      }
    }

81 82 83 84
    CollectiveContext *context = CollectiveContext::GetInstance();
    context->endpoints_ = strategy_.trainers_endpoints_;
    context->trainer_id_ = strategy_.trainer_id_;
    PADDLE_ENFORCE(strategy_.trainer_id_ >= 0, "trainer_id_ >= 0");
85
    if (strategy_.trainer_id_ > 0 && strategy_.trainers_endpoints_.size() > 0) {
86 87 88 89 90 91
      PADDLE_ENFORCE((unsigned)(strategy_.trainer_id_) <
                         strategy_.trainers_endpoints_.size(),
                     "trainer_id_ < endpoints_ size");
    }
    VLOG(1) << "CollectiveContext:" << context->String();

D
dzhwinter 已提交
92 93 94 95 96 97
    // NOTE(dzh): memory optimize should be a runtime pass.
    // However, after multi_devices_pass, VarHandle, OpHandle is
    // the de-fact IR, any reuse on Graph is meaningless.
    // A side-effect of that, memory optimize cannot forsee the fetched vars
    // , so fetchlist should be set persistable before call the Run interface.
    if (strategy.memory_optimize_) {
D
dzhwinter 已提交
98
      auto memory_optimize_pass = AppendPass("memory_optimize_pass");
D
dzhwinter 已提交
99
    }
100 101

    AppendMultiDevPass(strategy);
102

X
Xin Pan 已提交
103
    // Add a graph print pass to record a graph with device info.
104 105
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto multi_devices_print_pass = AppendPass("multi_devices_print_pass");
D
dzhwinter 已提交
106 107 108 109 110
      const std::string graph_path =
          string::Sprintf("%s%s", strategy_.debug_graphviz_path_.c_str(),
                          "_multi_devices_graph");
      multi_devices_print_pass->Set<std::string>(kGraphvizPath,
                                                 new std::string(graph_path));
111 112 113 114 115 116
      multi_devices_print_pass->Set<details::GraphvizSSAGraphPrinter>(
          "graph_printer", new details::GraphvizSSAGraphPrinter);
    }

    // Verify that the graph is correct for multi-device executor.
    AppendPass("multi_devices_check_pass");
S
sneaxiy 已提交
117

118 119 120 121
    if (SeqOnlyAllReduceOps(strategy)) {
      AppendPass("all_reduce_deps_pass");
    }

S
sneaxiy 已提交
122 123 124
    if (strategy_.remove_unnecessary_lock_) {
      AppendPass("modify_op_lock_and_record_event_pass");
    }
125 126
  }

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
  // Convert graph to run on multi-devices.
  void AppendMultiDevPass(const BuildStrategy &strategy) {
    ir::Pass *multi_devices_pass;
    if (strategy_.is_distribution_) {
      multi_devices_pass = AppendPass("dist_multi_devices_pass").get();
    } else {
      if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
        multi_devices_pass =
            AppendPass("allreduce_mode_multi_devices_pass").get();
      } else if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kReduce) {
        multi_devices_pass = AppendPass("reduce_mode_multi_devices_pass").get();
      } else {
        PADDLE_THROW("Unknown reduce strategy.");
      }
    }
    multi_devices_pass->SetNotOwned<const BuildStrategy>("strategy",
                                                         &strategy_);
  }

146 147 148 149
 private:
  BuildStrategy strategy_;
};

150
std::shared_ptr<ir::PassBuilder> BuildStrategy::CreatePassesFromStrategy(
X
Xin Pan 已提交
151 152
    bool finalize_strategy) const {
  if (is_finalized_) {
153 154
    return pass_builder_;
  }
155
  pass_builder_.reset(new ParallelExecutorPassBuilder(*this));
X
Xin Pan 已提交
156 157
  if (finalize_strategy) {
    is_finalized_ = true;
158
  }
X
fix  
Xin Pan 已提交
159
  return pass_builder_;
160 161
}

162 163 164 165
bool BuildStrategy::IsMultiDevPass(const std::string &pass_name) const {
  return framework::details::MultiDevSSAGraphBuilder().count(pass_name) > 0;
}

166 167
std::unique_ptr<ir::Graph> BuildStrategy::Apply(
    const ProgramDesc &main_program, const std::vector<platform::Place> &places,
168
    const std::string &loss_var_name, const std::vector<Scope *> &local_scopes,
169
    const size_t &nranks,
P
peizhilin 已提交
170
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
171 172 173 174
    const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const {
#else
    const bool use_cuda) const {
#endif
175 176
  // Create a default one if not finalized by user.
  CreatePassesFromStrategy(false);
X
fix  
Xin Pan 已提交
177 178 179

  std::unique_ptr<ir::Graph> graph(new ir::Graph(main_program));
  for (std::shared_ptr<ir::Pass> &pass : pass_builder_->AllPasses()) {
180 181 182 183 184 185 186
    if (IsMultiDevPass(pass->Type())) {
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLossVarName);
      pass->SetNotOwned<const std::string>(kLossVarName, &loss_var_name);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
X
fix  
Xin Pan 已提交
187
                                                    &local_scopes);
188 189
      pass->Erase(kNRanks);
      pass->Set<size_t>(kNRanks, new size_t(nranks));
Y
Yancey1989 已提交
190

P
peizhilin 已提交
191
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
fix  
Xin Pan 已提交
192 193 194
      platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
      pass->Erase("nccl_ctxs");
      pass->SetNotOwned<platform::NCCLContextMap>("nccl_ctxs", nctx);
195
#endif
D
dzhwinter 已提交
196
    } else if (pass->Type() == "memory_optimize_pass") {
197 198 199
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
D
dzhwinter 已提交
200 201 202 203 204 205 206 207 208 209
      const std::vector<OpDesc *> *all_op_descs =
          new std::vector<OpDesc *>(main_program.Block(0).AllOps());
      graph->Set<const std::vector<OpDesc *>>(kAllOpDescs,
                                              all_op_descs);  // take ownership
      graph->Set<GraphNodePool>(kGraphNodePool,
                                new GraphNodePool);  // take ownership

      pass->Erase(kAllOpDescs);
      pass->SetNotOwned<const std::vector<OpDesc *>>(kAllOpDescs, all_op_descs);

S
sneaxiy 已提交
210
    } else if (pass->Type() == "sequential_execution_pass") {
211 212
      LOG(INFO) << "set enable_sequential_execution:"
                << enable_sequential_execution_;
213 214 215 216 217 218

      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
    } else if (pass->Type() == "all_reduce_deps_pass") {
219 220
      LOG(INFO) << "SeqOnlyAllReduceOps:" << SeqOnlyAllReduceOps(*this)
                << ", num_trainers:" << num_trainers_;
221

S
sneaxiy 已提交
222 223 224 225
      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
D
dzhwinter 已提交
226 227 228 229
    } else if (pass->Type() == "inplace_pass") {
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
230 231 232
      if (!graph->Has(kGraphviz)) {
        graph->Set<GraphvizNodes>(kGraphviz, new GraphvizNodes);
      }
D
dzhwinter 已提交
233 234 235
      graph->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
236 237 238 239 240 241
    } else if (pass->Type() == "fuse_relu_depthwise_conv_pass") {
      if (!use_cuda) {
        LOG(WARNING) << "fuse_relu_depthwise_conv_pass is only supported on "
                        "GPU, skipped.";
        continue;
      }
242 243 244 245
    } else if (pass->Type() == "graph_print_path") {
      if (!graph->Has(kGraphviz)) {
        graph->Set<GraphvizNodes>(kGraphviz, new GraphvizNodes);
      }
X
fix  
Xin Pan 已提交
246 247 248
    }
    graph = pass->Apply(std::move(graph));
  }
249 250
  return graph;
}
D
dzhwinter 已提交
251

252 253 254 255
}  // namespace details
}  // namespace framework
}  // namespace paddle

256
USE_PASS(fuse_relu_depthwise_conv_pass);
257 258
USE_PASS(fuse_elewise_add_act_pass);
USE_PASS(graph_viz_pass);
259
USE_PASS(multi_batch_merge_pass);
260 261 262
USE_PASS(reduce_mode_multi_devices_pass);
USE_PASS(allreduce_mode_multi_devices_pass);
USE_PASS(dist_multi_devices_pass);
263 264
USE_PASS(multi_devices_check_pass);
USE_PASS(multi_devices_print_pass);
D
dzhwinter 已提交
265
USE_PASS(memory_optimize_pass);
S
sneaxiy 已提交
266
USE_PASS(sequential_execution_pass);
267
USE_PASS(all_reduce_deps_pass);
S
sneaxiy 已提交
268
USE_PASS(modify_op_lock_and_record_event_pass);
D
dzhwinter 已提交
269
USE_PASS(inplace_pass);
M
minqiyang 已提交
270
USE_PASS(lock_free_optimize_pass);
271
USE_PASS(graph_print_pass);
W
WangZhen 已提交
272
USE_PASS(graph_to_program_pass);