build_strategy.cc 10.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/details/build_strategy.h"

D
dzhwinter 已提交
17 18 19
#include <glog/logging.h>
#include <memory>

D
dzhwinter 已提交
20
#include "paddle/fluid/framework/details/memory_optimize_helper.h"
21
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
22
#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h"
23
#include "paddle/fluid/framework/details/reduce_op_handle.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/framework/details/sequential_execution_pass.h"
25
#include "paddle/fluid/framework/ir/graph.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/framework/ir/graph_helper.h"
W
WangZhen 已提交
27
#include "paddle/fluid/framework/ir/graph_to_program_pass.h"
28 29 30 31 32 33
#include "paddle/fluid/framework/ir/graph_viz_pass.h"

namespace paddle {
namespace framework {
namespace details {

34
static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
Y
Yancey1989 已提交
35 36
  // Should fix the allreduce op order if scheduling
  // them in multiple threads or processes to avoid hang.
Y
Yancey1989 已提交
37 38 39
  return (!strategy.enable_sequential_execution_ &&
          strategy.num_trainers_ > 1) ||
         strategy.enable_parallel_graph_;
40 41
}

42 43 44 45
class ParallelExecutorPassBuilder : public ir::PassBuilder {
 public:
  explicit ParallelExecutorPassBuilder(const BuildStrategy &strategy)
      : ir::PassBuilder(), strategy_(strategy) {
D
dzhwinter 已提交
46 47 48
    if (strategy_.enable_sequential_execution_) {
      AppendPass("sequential_execution_pass");
    }
49

D
dzhwinter 已提交
50 51 52
    // Add op fusion.
    if (strategy.fuse_relu_depthwise_conv_) {
      AppendPass("fuse_relu_depthwise_conv_pass");
D
dzhwinter 已提交
53
    }
54

D
dzhwinter 已提交
55 56 57 58 59 60
    // NOTE(dzhwinter): A note for automatical inplace.
    // 1. modify program desc passes should put
    // before inplace pass.
    // 2. manually configured inplace should put
    // before inplace_pass

D
dzhwinter 已提交
61 62 63
    // Add automatically inplace.
    if (strategy_.enable_inplace_) {
      AppendPass("inplace_pass");
S
sneaxiy 已提交
64 65
    }

X
Xin Pan 已提交
66
    // Add a graph viz pass to record a graph.
67 68 69 70 71 72 73 74
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto viz_pass = AppendPass("graph_viz_pass");
      const std::string graph_path = string::Sprintf(
          "%s%s", strategy_.debug_graphviz_path_.c_str(), "_original_graph");
      viz_pass->Set<std::string>("graph_viz_path", new std::string(graph_path));
    }

    if (strategy.fuse_elewise_add_act_ops_) {
X
Xin Pan 已提交
75
      auto fuse_elewise_add_act_pass = AppendPass("fuse_elewise_add_act_pass");
X
Xin Pan 已提交
76
      // Add a graph viz pass to record a graph.
77
      if (!strategy.debug_graphviz_path_.empty()) {
X
Xin Pan 已提交
78
        auto viz_pass = AppendPass("graph_viz_pass");
79 80
        const std::string graph_path = string::Sprintf(
            "%s%s", strategy.debug_graphviz_path_.c_str(), "_fused_graph");
X
Xin Pan 已提交
81 82
        viz_pass->Set<std::string>("graph_viz_path",
                                   new std::string(graph_path));
83 84 85
      }
    }

86 87 88 89
    CollectiveContext *context = CollectiveContext::GetInstance();
    context->endpoints_ = strategy_.trainers_endpoints_;
    context->trainer_id_ = strategy_.trainer_id_;
    PADDLE_ENFORCE(strategy_.trainer_id_ >= 0, "trainer_id_ >= 0");
90
    if (strategy_.trainer_id_ > 0 && strategy_.trainers_endpoints_.size() > 0) {
91 92 93 94 95 96
      PADDLE_ENFORCE((unsigned)(strategy_.trainer_id_) <
                         strategy_.trainers_endpoints_.size(),
                     "trainer_id_ < endpoints_ size");
    }
    VLOG(1) << "CollectiveContext:" << context->String();

D
dzhwinter 已提交
97 98 99 100 101 102
    // NOTE(dzh): memory optimize should be a runtime pass.
    // However, after multi_devices_pass, VarHandle, OpHandle is
    // the de-fact IR, any reuse on Graph is meaningless.
    // A side-effect of that, memory optimize cannot forsee the fetched vars
    // , so fetchlist should be set persistable before call the Run interface.
    if (strategy.memory_optimize_) {
D
dzhwinter 已提交
103
      auto memory_optimize_pass = AppendPass("memory_optimize_pass");
D
dzhwinter 已提交
104
    }
105 106

    AppendMultiDevPass(strategy);
107

X
Xin Pan 已提交
108
    // Add a graph print pass to record a graph with device info.
109 110
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto multi_devices_print_pass = AppendPass("multi_devices_print_pass");
D
dzhwinter 已提交
111 112 113 114 115
      const std::string graph_path =
          string::Sprintf("%s%s", strategy_.debug_graphviz_path_.c_str(),
                          "_multi_devices_graph");
      multi_devices_print_pass->Set<std::string>(kGraphvizPath,
                                                 new std::string(graph_path));
116 117 118 119 120 121
      multi_devices_print_pass->Set<details::GraphvizSSAGraphPrinter>(
          "graph_printer", new details::GraphvizSSAGraphPrinter);
    }

    // Verify that the graph is correct for multi-device executor.
    AppendPass("multi_devices_check_pass");
S
sneaxiy 已提交
122

123 124 125 126
    if (SeqOnlyAllReduceOps(strategy)) {
      AppendPass("all_reduce_deps_pass");
    }

S
sneaxiy 已提交
127 128 129
    if (strategy_.remove_unnecessary_lock_) {
      AppendPass("modify_op_lock_and_record_event_pass");
    }
130 131
  }

132 133 134 135
  // Convert graph to run on multi-devices.
  void AppendMultiDevPass(const BuildStrategy &strategy) {
    ir::Pass *multi_devices_pass;
    if (strategy_.is_distribution_) {
Q
Qiao Longfei 已提交
136
      VLOG(3) << "multi device dist train mode";
137 138 139
      multi_devices_pass = AppendPass("dist_multi_devices_pass").get();
    } else {
      if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
Q
Qiao Longfei 已提交
140
        VLOG(3) << "multi device allreduce mode";
141 142 143
        multi_devices_pass =
            AppendPass("allreduce_mode_multi_devices_pass").get();
      } else if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kReduce) {
Q
Qiao Longfei 已提交
144
        VLOG(3) << "multi device reduce mode";
145 146 147 148 149 150 151 152 153
        multi_devices_pass = AppendPass("reduce_mode_multi_devices_pass").get();
      } else {
        PADDLE_THROW("Unknown reduce strategy.");
      }
    }
    multi_devices_pass->SetNotOwned<const BuildStrategy>("strategy",
                                                         &strategy_);
  }

154 155 156 157
 private:
  BuildStrategy strategy_;
};

158
std::shared_ptr<ir::PassBuilder> BuildStrategy::CreatePassesFromStrategy(
X
Xin Pan 已提交
159 160
    bool finalize_strategy) const {
  if (is_finalized_) {
161 162
    return pass_builder_;
  }
163
  pass_builder_.reset(new ParallelExecutorPassBuilder(*this));
X
Xin Pan 已提交
164 165
  if (finalize_strategy) {
    is_finalized_ = true;
166
  }
X
fix  
Xin Pan 已提交
167
  return pass_builder_;
168 169
}

170 171 172 173
bool BuildStrategy::IsMultiDevPass(const std::string &pass_name) const {
  return framework::details::MultiDevSSAGraphBuilder().count(pass_name) > 0;
}

174 175
std::unique_ptr<ir::Graph> BuildStrategy::Apply(
    const ProgramDesc &main_program, const std::vector<platform::Place> &places,
176
    const std::string &loss_var_name, const std::vector<Scope *> &local_scopes,
177
    const size_t &nranks,
P
peizhilin 已提交
178
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
179 180 181 182
    const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const {
#else
    const bool use_cuda) const {
#endif
183 184
  // Create a default one if not finalized by user.
  CreatePassesFromStrategy(false);
X
fix  
Xin Pan 已提交
185 186 187

  std::unique_ptr<ir::Graph> graph(new ir::Graph(main_program));
  for (std::shared_ptr<ir::Pass> &pass : pass_builder_->AllPasses()) {
188 189 190 191 192 193 194
    if (IsMultiDevPass(pass->Type())) {
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLossVarName);
      pass->SetNotOwned<const std::string>(kLossVarName, &loss_var_name);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
X
fix  
Xin Pan 已提交
195
                                                    &local_scopes);
196 197
      pass->Erase(kNRanks);
      pass->Set<size_t>(kNRanks, new size_t(nranks));
Y
Yancey1989 已提交
198

P
peizhilin 已提交
199
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
fix  
Xin Pan 已提交
200 201 202
      platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
      pass->Erase("nccl_ctxs");
      pass->SetNotOwned<platform::NCCLContextMap>("nccl_ctxs", nctx);
203
#endif
D
dzhwinter 已提交
204
    } else if (pass->Type() == "memory_optimize_pass") {
205 206 207
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
D
dzhwinter 已提交
208 209 210 211 212 213 214 215
      const std::vector<OpDesc *> *all_op_descs =
          new std::vector<OpDesc *>(main_program.Block(0).AllOps());
      graph->Set<const std::vector<OpDesc *>>(kAllOpDescs,
                                              all_op_descs);  // take ownership

      pass->Erase(kAllOpDescs);
      pass->SetNotOwned<const std::vector<OpDesc *>>(kAllOpDescs, all_op_descs);

S
sneaxiy 已提交
216
    } else if (pass->Type() == "sequential_execution_pass") {
217 218
      LOG(INFO) << "set enable_sequential_execution:"
                << enable_sequential_execution_;
219 220 221 222 223 224

      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
    } else if (pass->Type() == "all_reduce_deps_pass") {
225 226
      LOG(INFO) << "SeqOnlyAllReduceOps:" << SeqOnlyAllReduceOps(*this)
                << ", num_trainers:" << num_trainers_;
227

S
sneaxiy 已提交
228 229 230 231
      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
D
dzhwinter 已提交
232 233 234 235 236 237 238
    } else if (pass->Type() == "inplace_pass") {
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
      graph->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
239 240 241 242 243 244
    } else if (pass->Type() == "fuse_relu_depthwise_conv_pass") {
      if (!use_cuda) {
        LOG(WARNING) << "fuse_relu_depthwise_conv_pass is only supported on "
                        "GPU, skipped.";
        continue;
      }
X
fix  
Xin Pan 已提交
245 246 247
    }
    graph = pass->Apply(std::move(graph));
  }
248 249
  return graph;
}
D
dzhwinter 已提交
250

251 252 253 254
}  // namespace details
}  // namespace framework
}  // namespace paddle

255
USE_PASS(fuse_relu_depthwise_conv_pass);
256 257
USE_PASS(fuse_elewise_add_act_pass);
USE_PASS(graph_viz_pass);
258
USE_PASS(multi_batch_merge_pass);
259 260 261
USE_PASS(reduce_mode_multi_devices_pass);
USE_PASS(allreduce_mode_multi_devices_pass);
USE_PASS(dist_multi_devices_pass);
262 263
USE_PASS(multi_devices_check_pass);
USE_PASS(multi_devices_print_pass);
D
dzhwinter 已提交
264
USE_PASS(memory_optimize_pass);
S
sneaxiy 已提交
265
USE_PASS(sequential_execution_pass);
266
USE_PASS(all_reduce_deps_pass);
S
sneaxiy 已提交
267
USE_PASS(modify_op_lock_and_record_event_pass);
D
dzhwinter 已提交
268
USE_PASS(inplace_pass);
M
minqiyang 已提交
269
USE_PASS(lock_free_optimize_pass);
W
WangZhen 已提交
270
USE_PASS(graph_to_program_pass);