multi_devices_graph_builder.cc 8.8 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
#include "paddle/fluid/framework/details/computation_op_handle.h"
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
T
wip  
typhoonzero 已提交
18
#include "paddle/fluid/framework/details/send_op_handle.h"
Y
Yu Yang 已提交
19
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
20 21 22 23

#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/framework/details/nccl_all_reduce_op_handle.h"
#endif
Y
Yu Yang 已提交
24

Y
Yu Yang 已提交
25 26 27
#include <string>
#include <vector>

Y
Yu Yang 已提交
28 29 30
namespace paddle {
namespace framework {
namespace details {
Y
Yu Yang 已提交
31 32

#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
33 34 35 36
MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
    const std::vector<platform::Place> &places,
    const std::string &loss_var_name,
    const std::unordered_set<std::string> &params,
Y
Yu Yang 已提交
37
    const std::vector<Scope *> &local_scopes, bool skip_scale_loss,
T
typhoonzero 已提交
38
    platform::NCCLContextMap *nccl_ctxs)
Y
Yu Yang 已提交
39 40 41 42
    : loss_var_name_(loss_var_name),
      places_(places),
      local_scopes_(local_scopes),
      nccl_ctxs_(nccl_ctxs) {
Y
Yu Yang 已提交
43 44 45 46 47
#else
MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
    const std::vector<platform::Place> &places,
    const std::string &loss_var_name,
    const std::unordered_set<std::string> &params,
Y
Yu Yang 已提交
48
    const std::vector<Scope *> &local_scopes, bool skip_scale_loss)
Y
Yu Yang 已提交
49 50
    : loss_var_name_(loss_var_name),
      places_(places),
T
typhoonzero 已提交
51
      local_scopes_(local_scopes) {
Y
Yu Yang 已提交
52
#endif
Y
Yu Yang 已提交
53 54 55
  for (auto &p : params) {
    grad_names_.insert(GradVarName(p));
  }
Y
Yu Yang 已提交
56
  skip_scale_loss_ = skip_scale_loss;
Y
Yu Yang 已提交
57 58
}

Y
Yu Yang 已提交
59 60
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result,
                                                const OpDesc &op,
Y
Yu Yang 已提交
61 62
                                                size_t place_id) const {
  auto p = places_[place_id];
T
wip  
typhoonzero 已提交
63
  auto *op_handle = result->ops_.back().get();
X
Xin Pan 已提交
64 65
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
66

Y
Yu Yang 已提交
67 68 69
  for (auto &each_var_name : op.InputArgumentNames()) {
    VarHandle *var =
        CreateOrGetLatestVarHandle(result, each_var_name, p, place_id);
T
wip  
typhoonzero 已提交
70 71 72
    op_handle->AddInput(var);
  }

Y
Yu Yang 已提交
73 74
  for (auto &each_var_name : op.OutputArgumentNames()) {
    CreateOpOutput(result, op_handle, each_var_name, p, place_id);
T
wip  
typhoonzero 已提交
75 76 77
  }
}

T
typhoonzero 已提交
78 79 80 81 82 83
bool MultiDevSSAGraphBuilder::IsDistTrainOp(const OpDesc &op,
                                            OpDesc *send_op) const {
  if (send_op == nullptr) {
    return false;
  }

Y
Yu Yang 已提交
84 85 86 87 88
  /**
   * Check any of opvars contains `.block` and in sendvars
   */
  auto checker = [](const std::vector<std::string> &opvars,
                    const std::vector<std::string> &sendvars) -> bool {
T
typhoonzero 已提交
89 90 91
    for (auto &var : opvars) {
      if (var.find(".block") != std::string::npos &&
          std::find(sendvars.begin(), sendvars.end(), var) != sendvars.end()) {
Y
Yu Yang 已提交
92
        return true;
T
typhoonzero 已提交
93 94
      }
    }
Y
Yu Yang 已提交
95
    return false;
T
typhoonzero 已提交
96 97 98 99 100 101 102 103 104 105
  };

  if (op.Type() == "split") {
    return checker(op.OutputArgumentNames(), send_op->InputArgumentNames());
  } else if (op.Type() == "concat") {
    return checker(op.InputArgumentNames(), send_op->OutputArgumentNames());
  }
  return false;
}

Y
Yu Yang 已提交
106 107 108
std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build(
    const ProgramDesc &program) const {
  auto graph = new SSAGraph();
Y
Yu Yang 已提交
109
  SSAGraph &result = *graph;
C
chengduoZH 已提交
110
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
111 112 113 114 115

  // We cannot invoke resize. It is a bug of GCC 4.8
  result.vars_ = std::vector<
      std::unordered_map<std::string, std::vector<std::unique_ptr<VarHandle>>>>(
      places_.size());
Y
Yu Yang 已提交
116

T
typhoonzero 已提交
117
  // Find "send" op first for split is in front of send.
Y
Yu Yang 已提交
118
  OpDesc *send_op = GetSendOpDesc(program);
T
typhoonzero 已提交
119

Y
Yu Yang 已提交
120 121
  bool is_forwarding = true;
  for (auto *op : program.Block(0).AllOps()) {
Y
Yu Yang 已提交
122 123 124 125
    if (op->Type() == "send") {
      // append send op if program is distributed trainer main program.
      // always use the first device
      CreateSendOp(&result, *op);
T
typhoonzero 已提交
126 127
    } else if (IsDistTrainOp(*op, send_op)) {
      CreateComputationalOps(&result, *op, 1);
Y
Yu Yang 已提交
128
    } else if (IsScaleLossOp(*op)) {
Y
Yu Yang 已提交
129
      // user can customize loss@grad if skip_scale_loss_
Y
Yu Yang 已提交
130 131 132
      if (!skip_scale_loss_) {
        CreateScaleLossGradOp(&result);
      }
Y
Yu Yang 已提交
133
      is_forwarding = false;
Y
Yu Yang 已提交
134
    } else {
T
typhoonzero 已提交
135
      CreateComputationalOps(&result, *op, places_.size());
Y
Yu Yang 已提交
136 137
      if (!is_forwarding) {
        // Currently, we assume that once gradient is generated, it can be
Y
Yu Yang 已提交
138
        // broadcast, and each gradient is only broadcast once.
Y
Yu Yang 已提交
139 140 141
        for (auto &og : op->OutputArgumentNames()) {
          if (IsParameterGradientOnce(og, &og_has_been_broadcast)) {
            InsertNCCLAllReduceOp(&result, og);
Y
Yu Yang 已提交
142 143 144 145 146 147 148 149 150 151 152
          }
        }
      }
    }
  }

  /*
    Dependency graph has been constructed. However, there are still data
    harzaeds need to be handled.
   */
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
153

Y
Yu Yang 已提交
154 155 156 157 158
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);

Y
Yu Yang 已提交
159 160 161 162 163 164
  if (VLOG_IS_ON(10)) {
    std::ostringstream sout;
    PrintGraphviz(*graph, sout);
    VLOG(10) << sout.str();
  }

Y
Yu Yang 已提交
165
  return std::unique_ptr<SSAGraph>(graph);
Y
Yu Yang 已提交
166 167
}

Y
Yu Yang 已提交
168 169 170 171 172 173 174 175 176 177
OpDesc *MultiDevSSAGraphBuilder::GetSendOpDesc(
    const ProgramDesc &program) const {
  for (auto *op : program.Block(0).AllOps()) {
    if (op->Type() == "send") {
      return op;
    }
  }
  return nullptr;
}

Y
Yu Yang 已提交
178 179 180 181 182 183 184 185 186 187
void MultiDevSSAGraphBuilder::InsertNCCLAllReduceOp(
    SSAGraph *result, const std::string &og) const {
#ifdef PADDLE_WITH_CUDA
  result->ops_.emplace_back(
      new NCCLAllReduceOpHandle(local_scopes_, places_, *nccl_ctxs_));
  auto *op_handle = result->ops_.back().get();

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    auto &vars = result->vars_[i][og];
Y
Yu Yang 已提交
188 189
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
Y
Yu Yang 已提交
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
    op_handle->AddInput(prev_grad.get());

    auto var = new VarHandle(vars.size() - 1, i, og, p);
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
#else
  PADDLE_ENFORCE("Not implemented");
#endif
}

bool MultiDevSSAGraphBuilder::IsParameterGradientOnce(
    const std::string &og,
    std::unordered_set<std::string> *og_has_been_broadcast) const {
  bool is_pg_once =
      grad_names_.count(og) != 0 && og_has_been_broadcast->count(og) == 0;
  if (is_pg_once) {
    // Insert NCCL AllReduce Op
    og_has_been_broadcast->insert(og);
  }
  return is_pg_once;
}

void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(SSAGraph *result) const {
  for (size_t i = 0; i < places_.size(); ++i) {
// Insert ScaleCost OpHandle
#ifdef PADDLE_WITH_CUDA
    auto *communication_dev_ctx = nccl_ctxs_->DevCtx(places_[i]);
#else
    auto *communication_dev_ctx =
        platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
#endif

    auto *op_handle =
        new ScaleLossGradOpHandle(local_scopes_.size(), local_scopes_[i],
                                  places_[i], communication_dev_ctx);
    result->ops_.emplace_back(op_handle);

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

    CreateOpOutput(result, op_handle, GradVarName(loss_var_name_), places_[i],
                   i);
  }
}

void MultiDevSSAGraphBuilder::CreateComputationalOps(SSAGraph *result,
T
typhoonzero 已提交
240 241 242
                                                     const OpDesc &op,
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
243 244 245
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
    result->ops_.emplace_back(new ComputationOpHandle(op, s, p));
Y
Yu Yang 已提交
246
    CreateOpHandleIOs(result, op, scope_idx);
Y
Yu Yang 已提交
247 248 249 250 251 252 253 254 255 256 257
  }
}

void MultiDevSSAGraphBuilder::CreateSendOp(SSAGraph *result,
                                           const OpDesc &op) const {
  auto &p = places_[0];
  auto *s = local_scopes_[0];
  // FIXME(wuyi): send op always copy from GPU 0
  result->ops_.emplace_back(new SendOpHandle(op, s, p));
  // Create inputs for output on original place and no ssa output
  // is created for send op.
Y
Yu Yang 已提交
258
  CreateOpHandleIOs(result, op, 0);
Y
Yu Yang 已提交
259 260 261 262 263 264 265
}

bool MultiDevSSAGraphBuilder::IsScaleLossOp(const OpDesc &op) const {
  // FIXME(yy): Do not hard code like this
  return op.OutputArgumentNames().size() == 1 &&
         op.OutputArgumentNames()[0] == GradVarName(loss_var_name_);
}
Y
Yu Yang 已提交
266 267 268
}  // namespace details
}  // namespace framework
}  // namespace paddle