build_strategy.cc 10.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/details/build_strategy.h"

D
dzhwinter 已提交
17 18 19
#include <glog/logging.h>
#include <memory>

D
dzhwinter 已提交
20
#include "paddle/fluid/framework/details/memory_optimize_helper.h"
21
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
22
#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h"
23
#include "paddle/fluid/framework/details/reduce_op_handle.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/framework/details/sequential_execution_pass.h"
25
#include "paddle/fluid/framework/ir/graph.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/framework/ir/graph_helper.h"
W
WangZhen 已提交
27
#include "paddle/fluid/framework/ir/graph_to_program_pass.h"
28 29 30 31 32 33
#include "paddle/fluid/framework/ir/graph_viz_pass.h"

namespace paddle {
namespace framework {
namespace details {

34
static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
Y
Yancey1989 已提交
35 36
  // Should fix the allreduce op order if scheduling
  // them in multiple threads or processes to avoid hang.
Y
Yancey1989 已提交
37
  // NOTE: ParallelGraph would execute this pass on each graph, so
Y
Yancey1989 已提交
38
  // don't need to append it here.
Y
Yancey1989 已提交
39
  return (!strategy.enable_sequential_execution_ &&
Y
Yancey1989 已提交
40 41
          strategy.num_trainers_ > 1) &&
         !strategy.enable_parallel_graph_;
42 43
}

44 45 46 47
class ParallelExecutorPassBuilder : public ir::PassBuilder {
 public:
  explicit ParallelExecutorPassBuilder(const BuildStrategy &strategy)
      : ir::PassBuilder(), strategy_(strategy) {
D
dzhwinter 已提交
48 49 50
    if (strategy_.enable_sequential_execution_) {
      AppendPass("sequential_execution_pass");
    }
51

D
dzhwinter 已提交
52 53 54
    // Add op fusion.
    if (strategy.fuse_relu_depthwise_conv_) {
      AppendPass("fuse_relu_depthwise_conv_pass");
D
dzhwinter 已提交
55
    }
56

D
dzhwinter 已提交
57 58 59 60 61 62
    // NOTE(dzhwinter): A note for automatical inplace.
    // 1. modify program desc passes should put
    // before inplace pass.
    // 2. manually configured inplace should put
    // before inplace_pass

D
dzhwinter 已提交
63 64 65
    // Add automatically inplace.
    if (strategy_.enable_inplace_) {
      AppendPass("inplace_pass");
S
sneaxiy 已提交
66 67
    }

X
Xin Pan 已提交
68
    // Add a graph viz pass to record a graph.
69 70 71 72 73 74 75 76
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto viz_pass = AppendPass("graph_viz_pass");
      const std::string graph_path = string::Sprintf(
          "%s%s", strategy_.debug_graphviz_path_.c_str(), "_original_graph");
      viz_pass->Set<std::string>("graph_viz_path", new std::string(graph_path));
    }

    if (strategy.fuse_elewise_add_act_ops_) {
X
Xin Pan 已提交
77
      auto fuse_elewise_add_act_pass = AppendPass("fuse_elewise_add_act_pass");
X
Xin Pan 已提交
78
      // Add a graph viz pass to record a graph.
79
      if (!strategy.debug_graphviz_path_.empty()) {
X
Xin Pan 已提交
80
        auto viz_pass = AppendPass("graph_viz_pass");
81 82
        const std::string graph_path = string::Sprintf(
            "%s%s", strategy.debug_graphviz_path_.c_str(), "_fused_graph");
X
Xin Pan 已提交
83 84
        viz_pass->Set<std::string>("graph_viz_path",
                                   new std::string(graph_path));
85 86 87
      }
    }

88 89 90 91
    CollectiveContext *context = CollectiveContext::GetInstance();
    context->endpoints_ = strategy_.trainers_endpoints_;
    context->trainer_id_ = strategy_.trainer_id_;
    PADDLE_ENFORCE(strategy_.trainer_id_ >= 0, "trainer_id_ >= 0");
92
    if (strategy_.trainer_id_ > 0 && strategy_.trainers_endpoints_.size() > 0) {
93 94 95 96 97 98
      PADDLE_ENFORCE((unsigned)(strategy_.trainer_id_) <
                         strategy_.trainers_endpoints_.size(),
                     "trainer_id_ < endpoints_ size");
    }
    VLOG(1) << "CollectiveContext:" << context->String();

D
dzhwinter 已提交
99 100 101 102 103 104
    // NOTE(dzh): memory optimize should be a runtime pass.
    // However, after multi_devices_pass, VarHandle, OpHandle is
    // the de-fact IR, any reuse on Graph is meaningless.
    // A side-effect of that, memory optimize cannot forsee the fetched vars
    // , so fetchlist should be set persistable before call the Run interface.
    if (strategy.memory_optimize_) {
D
dzhwinter 已提交
105
      auto memory_optimize_pass = AppendPass("memory_optimize_pass");
D
dzhwinter 已提交
106
    }
107 108

    AppendMultiDevPass(strategy);
109

X
Xin Pan 已提交
110
    // Add a graph print pass to record a graph with device info.
111 112
    if (!strategy_.debug_graphviz_path_.empty()) {
      auto multi_devices_print_pass = AppendPass("multi_devices_print_pass");
D
dzhwinter 已提交
113 114 115 116 117
      const std::string graph_path =
          string::Sprintf("%s%s", strategy_.debug_graphviz_path_.c_str(),
                          "_multi_devices_graph");
      multi_devices_print_pass->Set<std::string>(kGraphvizPath,
                                                 new std::string(graph_path));
118 119 120 121 122 123
      multi_devices_print_pass->Set<details::GraphvizSSAGraphPrinter>(
          "graph_printer", new details::GraphvizSSAGraphPrinter);
    }

    // Verify that the graph is correct for multi-device executor.
    AppendPass("multi_devices_check_pass");
S
sneaxiy 已提交
124

125 126 127 128
    if (SeqOnlyAllReduceOps(strategy)) {
      AppendPass("all_reduce_deps_pass");
    }

S
sneaxiy 已提交
129 130 131
    if (strategy_.remove_unnecessary_lock_) {
      AppendPass("modify_op_lock_and_record_event_pass");
    }
132 133
  }

134 135 136 137
  // Convert graph to run on multi-devices.
  void AppendMultiDevPass(const BuildStrategy &strategy) {
    ir::Pass *multi_devices_pass;
    if (strategy_.is_distribution_) {
Q
Qiao Longfei 已提交
138
      VLOG(3) << "multi device parameter server mode";
139 140 141
      multi_devices_pass = AppendPass("dist_multi_devices_pass").get();
    } else {
      if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
Q
Qiao Longfei 已提交
142
        VLOG(3) << "multi devices collective mode with allreduce";
143 144 145
        multi_devices_pass =
            AppendPass("allreduce_mode_multi_devices_pass").get();
      } else if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kReduce) {
Q
Qiao Longfei 已提交
146
        VLOG(3) << "multi deivces collective mode with reduce";
147 148 149 150 151 152 153 154 155
        multi_devices_pass = AppendPass("reduce_mode_multi_devices_pass").get();
      } else {
        PADDLE_THROW("Unknown reduce strategy.");
      }
    }
    multi_devices_pass->SetNotOwned<const BuildStrategy>("strategy",
                                                         &strategy_);
  }

156 157 158 159
 private:
  BuildStrategy strategy_;
};

160
std::shared_ptr<ir::PassBuilder> BuildStrategy::CreatePassesFromStrategy(
X
Xin Pan 已提交
161 162
    bool finalize_strategy) const {
  if (is_finalized_) {
163 164
    return pass_builder_;
  }
165
  pass_builder_.reset(new ParallelExecutorPassBuilder(*this));
X
Xin Pan 已提交
166 167
  if (finalize_strategy) {
    is_finalized_ = true;
168
  }
X
fix  
Xin Pan 已提交
169
  return pass_builder_;
170 171
}

172 173 174 175
bool BuildStrategy::IsMultiDevPass(const std::string &pass_name) const {
  return framework::details::MultiDevSSAGraphBuilder().count(pass_name) > 0;
}

176 177
std::unique_ptr<ir::Graph> BuildStrategy::Apply(
    const ProgramDesc &main_program, const std::vector<platform::Place> &places,
178
    const std::string &loss_var_name, const std::vector<Scope *> &local_scopes,
179
    const size_t &nranks,
P
peizhilin 已提交
180
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
181 182 183 184
    const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const {
#else
    const bool use_cuda) const {
#endif
185
  VLOG(3) << "apply all passes";
186 187
  // Create a default one if not finalized by user.
  CreatePassesFromStrategy(false);
X
fix  
Xin Pan 已提交
188 189 190

  std::unique_ptr<ir::Graph> graph(new ir::Graph(main_program));
  for (std::shared_ptr<ir::Pass> &pass : pass_builder_->AllPasses()) {
191
    VLOG(3) << "apply " << pass->Type();
192 193 194 195 196 197 198
    if (IsMultiDevPass(pass->Type())) {
      pass->Erase(kPlaces);
      pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
      pass->Erase(kLossVarName);
      pass->SetNotOwned<const std::string>(kLossVarName, &loss_var_name);
      pass->Erase(kLocalScopes);
      pass->SetNotOwned<const std::vector<Scope *>>(kLocalScopes,
X
fix  
Xin Pan 已提交
199
                                                    &local_scopes);
200 201
      pass->Erase(kNRanks);
      pass->Set<size_t>(kNRanks, new size_t(nranks));
Y
Yancey1989 已提交
202

P
peizhilin 已提交
203
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
fix  
Xin Pan 已提交
204 205 206
      platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr;
      pass->Erase("nccl_ctxs");
      pass->SetNotOwned<platform::NCCLContextMap>("nccl_ctxs", nctx);
207
#endif
D
dzhwinter 已提交
208
    } else if (pass->Type() == "memory_optimize_pass") {
209 210 211
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
D
dzhwinter 已提交
212 213 214 215 216 217 218 219
      const std::vector<OpDesc *> *all_op_descs =
          new std::vector<OpDesc *>(main_program.Block(0).AllOps());
      graph->Set<const std::vector<OpDesc *>>(kAllOpDescs,
                                              all_op_descs);  // take ownership

      pass->Erase(kAllOpDescs);
      pass->SetNotOwned<const std::vector<OpDesc *>>(kAllOpDescs, all_op_descs);

S
sneaxiy 已提交
220
    } else if (pass->Type() == "sequential_execution_pass") {
221 222
      LOG(INFO) << "set enable_sequential_execution:"
                << enable_sequential_execution_;
223 224 225 226 227 228

      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
    } else if (pass->Type() == "all_reduce_deps_pass") {
229 230
      LOG(INFO) << "SeqOnlyAllReduceOps:" << SeqOnlyAllReduceOps(*this)
                << ", num_trainers:" << num_trainers_;
231

S
sneaxiy 已提交
232 233 234 235
      pass->Erase(kAllOpDescs);
      pass->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
D
dzhwinter 已提交
236 237 238 239 240 241 242
    } else if (pass->Type() == "inplace_pass") {
      if (graph->Has(kAllOpDescs)) {
        graph->Erase(kAllOpDescs);
      }
      graph->Set<const std::vector<OpDesc *>>(
          kAllOpDescs,
          new std::vector<OpDesc *>(main_program.Block(0).AllOps()));
243 244 245 246 247 248
    } else if (pass->Type() == "fuse_relu_depthwise_conv_pass") {
      if (!use_cuda) {
        LOG(WARNING) << "fuse_relu_depthwise_conv_pass is only supported on "
                        "GPU, skipped.";
        continue;
      }
X
fix  
Xin Pan 已提交
249
    }
250
    VLOG(3) << "Start Apply Pass " << pass->Type();
X
fix  
Xin Pan 已提交
251
    graph = pass->Apply(std::move(graph));
252
    VLOG(3) << "Finish Apply Pass " << pass->Type();
X
fix  
Xin Pan 已提交
253
  }
254 255
  return graph;
}
D
dzhwinter 已提交
256

257 258 259 260
}  // namespace details
}  // namespace framework
}  // namespace paddle

261
USE_PASS(fuse_relu_depthwise_conv_pass);
262 263
USE_PASS(fuse_elewise_add_act_pass);
USE_PASS(graph_viz_pass);
264
USE_PASS(multi_batch_merge_pass);
265 266 267
USE_PASS(reduce_mode_multi_devices_pass);
USE_PASS(allreduce_mode_multi_devices_pass);
USE_PASS(dist_multi_devices_pass);
268 269
USE_PASS(multi_devices_check_pass);
USE_PASS(multi_devices_print_pass);
D
dzhwinter 已提交
270
USE_PASS(memory_optimize_pass);
S
sneaxiy 已提交
271
USE_PASS(sequential_execution_pass);
272
USE_PASS(all_reduce_deps_pass);
S
sneaxiy 已提交
273
USE_PASS(modify_op_lock_and_record_event_pass);
D
dzhwinter 已提交
274
USE_PASS(inplace_pass);
M
minqiyang 已提交
275
USE_PASS(lock_free_optimize_pass);
W
WangZhen 已提交
276
USE_PASS(graph_to_program_pass);