engine.cc 8.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/engine.h"

J
Jiabin Yang 已提交
17 18 19 20 21 22
#include <algorithm>
#include <memory>
#include <queue>
#include <unordered_map>
#include <unordered_set>
#include <utility>
23
#include <vector>
J
Jiabin Yang 已提交
24 25 26 27 28
#include "paddle/fluid/imperative/gradient_accumulator.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/profiler.h"
29 30 31 32

namespace paddle {
namespace imperative {

J
Jiabin Yang 已提交
33 34 35 36 37 38 39 40
void Engine::RunOp(paddle::imperative::OpBase* op,
                   const paddle::imperative::NameVarBaseMap& ins,
                   const paddle::imperative::NameVarBaseMap& outs,
                   const paddle::platform::Place& place) {
  platform::RecordEvent event(op->Type());

  op->Run(ins, outs);
}
41

J
Jiabin Yang 已提交
42 43 44 45 46
void BasicEngine::Init(VarBase* var, const detail::BackwardStrategy& strategy) {
  backward_strategy_ = strategy;
  const std::vector<OpBase*> ops = var->GradVarBase()->GradOps();
  var->ClearGradOps();

47 48 49
  if (ops.empty() || var->OverridedStopGradient()) {
    VLOG(3) << "Skip auto grad since there is no grad op for var or loss is "
               "stop_gradient=True: "
J
Jiabin Yang 已提交
50 51 52 53 54 55 56 57 58 59 60 61 62
            << var->Name();
    return;
  } else {
    bool valid = false;
    for (const auto& op : ops) {
      if (op) {
        valid = true;
      }
    }
    if (!valid) {
      VLOG(3) << "Skip auto grad since all grad op of start VarBase is nullptr";
      return;
    }
63
  }
J
Jiabin Yang 已提交
64 65 66 67 68 69
  init_ops_ = ops;
  platform::RecordEvent record_event("Imperative Backward");
  VLOG(3) << "start backward";

  PADDLE_ENFORCE_EQ(var->HasGradVar(), true,
                    "Grad variable not exist for variable %s", var->Name());
70

J
Jiabin Yang 已提交
71 72 73
  auto& fwd_var = var->Var().Get<framework::LoDTensor>();
  auto* grad_var =
      var->GradVarBase()->MutableVar()->GetMutable<framework::LoDTensor>();
74 75 76 77
  VLOG(6) << "init loss grad:" << var->GradVarBase()->Name()
          << " as stop_gradient false";
  var->GradVarBase()->InnerSetOverridedStopGradient(false);
  var->GradVarBase()->SetGradGenerated(true);
J
Jiabin Yang 已提交
78 79 80 81 82
  auto* dev_ctx = platform::DeviceContextPool::Instance().Get(fwd_var.place());
  grad_var->Resize(fwd_var.dims());
  grad_var->mutable_data(fwd_var.place(), fwd_var.type());
  operators::math::set_constant(*dev_ctx, grad_var, 1.0);
}
83

84
void BasicEngine::CheckBackwardInputs(OpBase* op) {
J
Jiabin Yang 已提交
85 86
  for (auto& pair : op->GetInsMap()) {
    for (auto& var : pair.second) {
87 88 89 90 91 92 93 94 95 96 97 98
      if (var && IsGrad(var.get())) {
        // if grad var has OverridedStopGradient skip this Op
        if (!var->GradGenerated()) {
          VLOG(6) << "Set ungenerated Grad: " << var->Name() << " as zero";
          auto* dev_ctx =
              platform::DeviceContextPool::Instance().Get(op->place());
          auto* tensor = var->MutableVar()->GetMutable<framework::LoDTensor>();
          tensor->mutable_data(op->place(), var->DataType());
          operators::math::set_constant(*dev_ctx, tensor, 0.0);
        } else {
          continue;
        }
J
Jiabin Yang 已提交
99
      }
100
    }
J
Jiabin Yang 已提交
101 102 103
  }
}

104 105 106 107 108 109 110 111 112 113 114 115
void BasicEngine::SetBackwardOutputs(paddle::imperative::OpBase* op) {
  for (auto& pair : op->GetOutsMap()) {
    for (auto& var : pair.second) {
      if (var) {
        // Set Backward outputs's generate_grad as true
        var->SetGradGenerated(true);
        VLOG(6) << "Set backward output: " << var->Name()
                << "'s SetGeneratedGrad as True";
      }
    }
  }
}
J
Jiabin Yang 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
void BasicEngine::PrepareGradAccumulators(OpBase* op) {
  for (const auto& pair : op->GetOutsMap()) {
    for (const auto& var : pair.second) {
      if (!var) continue;

      auto& accumulator = accumulators_[var.get()];
      if (!accumulator) {
        if (backward_strategy_.sorted_sum_gradient_) {
          accumulator.reset(new SortedGradientAccumulator(var.get()));
        } else {
          accumulator.reset(new EagerGradientAccumulator(var.get()));
        }
      }

      accumulator->IncreaseRefCnt();

      VLOG(3) << "Prepare to acccumulate variable grad " << var->Name()
              << "with reference count " << accumulator->RefCnt();
    }
  }
}

void BasicEngine::PrepareDeps() {
  PADDLE_ENFORCE_EQ(op_deps_.empty(), true, "Op deps must be initialized here");
  PADDLE_ENFORCE_EQ(accumulators_.empty(), true,
                    "Accumulators must be initialized here");

  std::queue<OpBase*> q;
  std::unordered_set<OpBase*> visited;
  for (const auto& init_op : init_ops_) {
    q.push(init_op);
    visited.insert(init_op);
148 149
  }

J
Jiabin Yang 已提交
150 151 152 153
  while (!q.empty()) {
    auto* cur_op = q.front();
    q.pop();
    VLOG(3) << "Checking grads of op " << cur_op->Type();
154

155 156 157
    CheckBackwardInputs(cur_op);

    SetBackwardOutputs(cur_op);
J
Jiabin Yang 已提交
158 159 160

    PrepareGradAccumulators(cur_op);

161 162 163 164 165 166 167
    auto& grad_pending_ops = cur_op->GradPendingOps();
    for (auto* grad_pending_op : grad_pending_ops) {
      PADDLE_ENFORCE_NOT_NULL(grad_pending_op);
      ++op_deps_[grad_pending_op];
      if (visited.count(grad_pending_op) == 0) {
        visited.insert(grad_pending_op);
        q.push(grad_pending_op);
J
Jiabin Yang 已提交
168 169 170
      }
    }
  }
171 172
}

J
Jiabin Yang 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
void BasicEngine::SumGradient(OpBase* op, std::shared_ptr<VarBase> src,
                              VarBase* dst) {
  auto iter = accumulators_.find(dst);
  PADDLE_ENFORCE_EQ(iter != accumulators_.end(), true,
                    "Cannot find gradient of variable %s", dst->Name());
  iter->second->Add(std::move(src), op->id());
}
void BasicEngine::Execute() {
  PrepareDeps();
  // Start execute Computation graph
  std::queue<OpBase*> q;
  for (const auto& init_op : init_ops_) {
    q.push(init_op);
  }
  while (!q.empty()) {
    OpBase* cur_op = q.front();
    q.pop();

    // Step 1: Run Backward
    auto& bwd_ins = cur_op->GetInsMap();
    auto& bwd_outs = cur_op->GetOutsMap();

    NameVarBaseMap tmp_outs;
    // A var may be coresponding to several grad var in one op
    std::unordered_map<VarBase*, std::vector<std::shared_ptr<VarBase>>> var_map;
    size_t counter = 0;
    for (auto& bwd_out : bwd_outs) {
      auto& tmp_var_list = tmp_outs[bwd_out.first];
      tmp_var_list.reserve(bwd_out.second.size());
      for (auto& var : bwd_out.second) {
        auto tmp_var = std::make_shared<VarBase>(
            false, "Gtmp@" + std::to_string(counter++));  // Do not need grad
        tmp_var_list.emplace_back(tmp_var);
        if (var) {
          var_map[var.get()].emplace_back(std::move(tmp_var));
          var->ClearGradOps();
        }
      }
    }

    VLOG(3) << "Start to execute grad op " << cur_op->Type();
    RunOp(cur_op, bwd_ins, tmp_outs, cur_op->place());
    // Step 2: Sum Gradient
    {
      platform::RecordEvent record_event("merge_grads");
      for (auto& var_pair : var_map) {
        auto* dst_var = var_pair.first;
        if (dst_var == nullptr) continue;
        for (auto& src_var : var_pair.second) {
          VLOG(3) << "Sum gradient of variable " << dst_var->Name()
                  << " after op " << cur_op->Type();
          SumGradient(cur_op, std::move(src_var), dst_var);
        }
      }
    }

    // Step 3: Collect ready ops
230 231 232
    for (auto* grad_pending_op : cur_op->GradPendingOps()) {
      PADDLE_ENFORCE_NOT_NULL(grad_pending_op);
      auto iter = op_deps_.find(grad_pending_op);
J
Jiabin Yang 已提交
233 234 235 236
      if (iter == op_deps_.end()) {
        continue;
      }

237
      VLOG(3) << "Found grad_pending op of " << cur_op->Type();
J
Jiabin Yang 已提交
238 239 240
      // An Op is ready to go while its deps comes to zero

      if (--(iter->second) == 0) {
241 242
        q.push(grad_pending_op);
        VLOG(3) << "Push grad_pending op " << grad_pending_op->Type()
J
Jiabin Yang 已提交
243 244 245 246 247 248 249 250 251 252 253
                << " into queue";
      }
    }

    // Step 4: Delete op to collect unused variables
    VLOG(3) << "Remove op after op " << cur_op->Type() << " runs";
    RemoveOp(cur_op);
  }
  VLOG(3) << "Clean properties of BasicEngine";
  CleanEngine();
}
254 255
}  // namespace imperative
}  // namespace paddle