basic_engine.cc 20.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/basic_engine.h"

#include <algorithm>
#include <memory>
#include <queue>
#include <sstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
26

27 28 29 30 31 32 33
#include "paddle/fluid/imperative/gradient_accumulator.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/op_base.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/profiler.h"

34 35
DECLARE_bool(sort_sum_gradient);

36 37 38
namespace paddle {
namespace imperative {

39 40 41 42
void BasicEngine::Init(
    const std::vector<std::shared_ptr<VarBase>>& tensors,
    const std::vector<std::shared_ptr<VarBase>>& grad_tensors,
    bool retain_graph) {
43
  retain_graph_ = retain_graph;
44

45 46 47 48 49 50 51
  PADDLE_ENFORCE_EQ(
      tensors.size(), grad_tensors.size(),
      platform::errors::Unavailable(
          "The size of tensors do not equal the size of grad_tensors,"
          "the size of tensors is %s, but the size of grad_tensors is %s.",
          tensors.size(), grad_tensors.size()));

C
chentianyu03 已提交
52 53 54 55 56
  PADDLE_ENFORCE_EQ(accumulators_.empty(), true,
                    platform::errors::AlreadyExists(
                        "Accumulators are not empty before preparing it for "
                        "backward network execution."));

57 58 59 60 61
  for (size_t i = 0; i < tensors.size(); ++i) {
    auto var = tensors[i];
    auto grad_tensor = grad_tensors[i];

    auto init_node = var->GradVarBase()->GradNode();
C
chentianyu03 已提交
62

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
    PADDLE_ENFORCE_EQ(
        var->GradVarBase()->GraphIsFreed(), false,
        platform::errors::Unavailable(
            "%s trying to backward through the same graph a second "
            "time, but this graph have already been freed. Please "
            "specify Tensor.backward(retain_graph=True) when "
            "calling backward at the first time.",
            var->Name()));

    if (!retain_graph) {
      VLOG(5) << "Clear the auto-grad graph from grad var " << var->Name()
              << " because of retain_graph=False when calling backward";
      var->GradVarBase()->SetGraphIsFreed(true);
      var->GradVarBase()->ClearGradNode();
    }
78

79 80 81 82 83 84
    if (init_node == nullptr || var->OverridedStopGradient()) {
      VLOG(3) << "Skip auto grad since there is no grad op for var or loss is "
                 "stop_gradient=True: "
              << var->Name();
      continue;
    }
85

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
    VLOG(3) << "Init node of backward";

    PADDLE_ENFORCE_EQ(
        var->HasGradVar(), true,
        platform::errors::NotFound("Tensor %s has no gradient", var->Name()));

    auto& fwd_var = var->Var().Get<framework::LoDTensor>();
    auto* grad_var =
        var->GradVarBase()->MutableVar()->GetMutable<framework::LoDTensor>();
    VLOG(6) << "init loss grad:" << var->GradVarBase()->Name()
            << " as stop_gradient false";
    var->GradVarBase()->InnerSetOverridedStopGradient(false);
    auto* dev_ctx =
        platform::DeviceContextPool::Instance().Get(fwd_var.place());
    if (grad_tensor == nullptr) {
      grad_var->Resize(fwd_var.dims());
      grad_var->mutable_data(fwd_var.place(), fwd_var.type());
      operators::math::set_constant(*dev_ctx, grad_var, 1.0);
    } else {
      paddle::framework::TensorCopy(
          grad_tensor->Var().Get<framework::LoDTensor>(), fwd_var.place(),
          *dev_ctx, grad_var);
    }

C
chentianyu03 已提交
110 111 112 113 114 115 116 117 118 119
    VariableWrapper* init_grad_var = var->GradVarBase()->SharedVar().get();
    auto& accumulator = accumulators_[init_grad_var];
    if (!accumulator) {
      if (FLAGS_sort_sum_gradient) {
        accumulator.reset(new SortedGradientAccumulator(init_grad_var));
      } else {
        accumulator.reset(new EagerGradientAccumulator(init_grad_var));
      }
    }

120 121
    init_nodes_.push_back(init_node);
  }
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
}

void BasicEngine::CheckBackwardInputs(const OpBase& op) {
  for (auto& pair : op.GetInsMap()) {
    if (!pair.second.IsGrad()) {
      continue;
    }

    for (auto& var : pair.second) {
      if (!var) {
        continue;
      }

      auto* inner_var = var->MutableVar();
      framework::Tensor* tensor = nullptr;
      if (!inner_var->IsInitialized() ||
          inner_var->IsType<framework::LoDTensor>()) {
        tensor = inner_var->GetMutable<framework::LoDTensor>();
      }

      if (tensor && !tensor->IsInitialized()) {
        auto* dev_ctx = platform::DeviceContextPool::Instance().Get(op.place());
144 145 146 147 148 149 150 151
        // NOTE(zhiqiu): since grad variable is ungenerated, so the dtype is not
        // correct. var->DataType() returns the default dtype, which is float32.
        // Here, we use the type of the corresponding forward datatype.

        tensor->mutable_data(op.place(), var->ForwardDataType());
        VLOG(6) << "Set ungenerated Grad: " << var->Name()
                << " as zero with dtype "
                << framework::DataTypeToString(var->ForwardDataType());
152 153 154 155 156 157
        operators::math::set_constant(*dev_ctx, tensor, 0.0);
      }
    }
  }
}

158 159 160
void BasicEngine::PrepareGradAccumulators(
    const OpBase& op,
    const std::vector<std::shared_ptr<GradOpNode>>& grad_pending_nodes) {
161 162 163 164 165 166 167 168
  for (const auto& pair : op.GetOutsMap()) {
    if (!pair.second.IsGrad()) {
      continue;
    }

    for (const auto& var : pair.second) {
      if (!var) continue;

169 170 171 172 173 174 175 176
      if (!var->HasGradNode()) {
        auto& accumulator = accumulators_[var.get()];
        if (!accumulator) {
          if (FLAGS_sort_sum_gradient) {
            accumulator.reset(new SortedGradientAccumulator(var.get()));
          } else {
            accumulator.reset(new EagerGradientAccumulator(var.get()));
          }
177 178
        }

179
        accumulator->IncreaseRefCnt();
180

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
        VLOG(3) << "Prepare to acccumulate variable grad " << var->Name() << "("
                << var.get()
                << ") that don't have grad node  with reference count "
                << accumulator->RefCnt();
      } else {
        // Because Inplace op overwrites the grad_node of the input grad_var. So
        // only the information of grad_pending_node can be used to find the
        // grad_node of grad_var.
        bool find_grad_node_of_var = false;
        for (auto& grad_pending_node : grad_pending_nodes) {
          PADDLE_ENFORCE_NOT_NULL(
              grad_pending_node,
              platform::errors::NotFound("Grad pending node is nullptr."));
          for (auto& grad_pending_op : *grad_pending_node) {
            VLOG(6) << "Determine whether var (" << var->Name()
                    << ") is the input var of grad_pending_op ("
                    << grad_pending_op.Type() << ").";
            grad_pending_op.EnforceHasInOut();
            for (const auto& grad_pending_op_ins_pair :
                 grad_pending_op.GetInsMap()) {
              if (!grad_pending_op_ins_pair.second.IsGrad()) {
                continue;
              }
              for (const auto& pending_in_var :
                   grad_pending_op_ins_pair.second) {
                if (var == pending_in_var) {
                  VLOG(6) << "Var (" << var->Name()
                          << ") is the input var of grad_pending_op ("
                          << grad_pending_op.Type() << ").";
                  find_grad_node_of_var = true;
                  break;
                }
              }
              if (find_grad_node_of_var) {
                break;
              }
            }
          }
219

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
          if (find_grad_node_of_var) {
            auto& accumulator =
                accumulators_with_grad_node_[grad_pending_node][var.get()];

            if (!accumulator) {
              if (FLAGS_sort_sum_gradient) {
                accumulator.reset(new SortedGradientAccumulator(var.get()));
              } else {
                accumulator.reset(new EagerGradientAccumulator(var.get()));
              }
            }

            accumulator->IncreaseRefCnt();

            VLOG(3) << "Prepare to acccumulate variable grad " << var->Name()
                    << "(" << var.get()
                    << ") that has grad node with reference count "
                    << accumulator->RefCnt();
            break;
          }
        }
        PADDLE_ENFORCE_EQ(
            find_grad_node_of_var, true,
            platform::errors::NotFound(
                "No grad node corresponding to grad Tensor (%s) was found.",
                var->Name()));
246
      }
247 248 249 250 251 252 253
    }
  }
}

void BasicEngine::PrepareDeps() {
  PADDLE_ENFORCE_EQ(
      node_deps_.empty(), true,
254 255 256 257 258 259
      platform::errors::AlreadyExists("Op deps are not empty before preparing "
                                      "it for backward network execution."));
  PADDLE_ENFORCE_EQ(accumulators_with_grad_node_.empty(), true,
                    platform::errors::AlreadyExists(
                        "Accumulators with grad_node as the key are not empty "
                        "before preparing it for backward network execution."));
260 261 262 263

  std::queue<GradOpNode*> q;
  std::unordered_set<GradOpNode*> visited;

264 265 266 267
  for (size_t i = 0; i < init_nodes_.size(); ++i) {
    q.push(init_nodes_[i].get());
    visited.insert(init_nodes_[i].get());
  }
268 269 270 271 272

  while (!q.empty()) {
    auto* cur_node = q.front();
    q.pop();

273 274
    const auto& grad_pending_nodes = cur_node->GradPendingNodes();

275
    for (auto& cur_op : *cur_node) {
Z
Zeng Jinle 已提交
276
      cur_op.EnforceHasInOut();
277
      PrepareGradAccumulators(cur_op, grad_pending_nodes);
278 279 280 281 282
    }

    for (auto& grad_pending_node : grad_pending_nodes) {
      PADDLE_ENFORCE_NOT_NULL(
          grad_pending_node,
283
          platform::errors::NotFound("Grad pending node is nullptr."));
284 285 286 287 288 289 290 291 292
      ++node_deps_[grad_pending_node.get()];
      if (visited.count(grad_pending_node.get()) == 0) {
        visited.insert(grad_pending_node.get());
        q.push(grad_pending_node.get());
      }
    }
  }
}

293 294 295 296 297 298
static std::shared_ptr<NameVarMap<VariableWrapper>> CallGradientHooks(
    const NameVarMap<VariableWrapper>& bwd_ins, const std::string& op_type) {
  std::shared_ptr<NameVarMap<VariableWrapper>> tmp_ins_ptr = nullptr;
  for (const auto& pair : bwd_ins) {
    for (size_t i = 0; i < pair.second.size(); ++i) {
      auto& var = pair.second[i];
299
      if (var->HasVariableWrapperHook()) {
300 301 302
        if (tmp_ins_ptr == nullptr) {
          tmp_ins_ptr = std::make_shared<NameVarMap<VariableWrapper>>(bwd_ins);
        }
303 304 305
        VLOG(3) << "Call " << var->GetVariableWrapperHooks().size()
                << " hooks of " << op_type << "'s input `" << pair.first
                << "`'s var `" << var->Name() << "`.";
306
        auto tmp_var = var;
307
        for (const auto& hook_pair : var->GetVariableWrapperHooks()) {
308 309 310 311 312 313 314 315 316
          tmp_var = (*hook_pair.second)(tmp_var);
        }
        (*tmp_ins_ptr)[pair.first][i] = tmp_var;
      }
    }
  }
  return tmp_ins_ptr;
}

317
void BasicEngine::Execute() {
318
  if (init_nodes_.empty()) {
319 320 321 322 323 324
    return;
  }

  PrepareDeps();
  // Start execute Computation graph
  std::queue<std::shared_ptr<GradOpNode>> q;
325
  for (size_t i = 0; i < init_nodes_.size(); ++i) {
C
chentianyu03 已提交
326 327 328
    if (node_deps_[init_nodes_[i].get()] == 0) {
      q.push(std::move(init_nodes_[i]));
    }
329
  }
330 331 332 333 334 335 336

  size_t op_num = 0;

  while (!q.empty()) {
    auto shared_cur_node = std::move(q.front());
    q.pop();

337 338
    auto& inplace_grad_name_map = shared_cur_node->InplaceGradNameMap();

339
    for (auto& cur_op : *shared_cur_node) {
340 341
      platform::RecordEvent op_type_record_event(cur_op.Type());

342 343 344 345 346
      ++op_num;

      // CheckBackWardInput
      CheckBackwardInputs(cur_op);

347
      // Step 1: Run Backward OP
348 349 350
      auto& bwd_ins = cur_op.GetInsMap();
      auto& bwd_outs = cur_op.GetOutsMap();

351 352 353 354 355 356 357
      /**
       * [ Why need temporary outputs here? ]
       *
       * - construct the temp output map, avoid to disrupt graph
       * - replace the element in the map by temp var, because a
       *   var may be coresponding to several grad var in one op
       */
358
      NameVarMap<VariableWrapper> tmp_outs(bwd_outs);
359

360 361 362 363 364 365 366 367 368 369
      for (auto& pair : tmp_outs) {
        if (!pair.second.IsGrad()) {
          continue;
        }

        for (auto& var : pair.second) {
          if (!var) {
            continue;
          }

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
          std::unordered_map<VariableWrapper*,
                             std::unique_ptr<GradientAccumulator>>::iterator
              iter;
          if (!var->HasGradNode()) {
            VLOG(10) << "Find gradient of var (" << var->Name()
                     << ") with no grad_node.";
            iter = accumulators_.find(var.get());
            PADDLE_ENFORCE_EQ(
                iter != accumulators_.end(), true,
                platform::errors::NotFound(
                    "Cannot find gradient of variable %s", var->Name()));
          } else {
            bool flag_find_grad = false;
            VLOG(10) << "Find gradient of var (" << var->Name()
                     << ") with grad_node.";
            for (auto& grad_pending_node :
                 shared_cur_node->GradPendingNodes()) {
              const auto& iter_grad_node =
                  accumulators_with_grad_node_.find(grad_pending_node);
              if (iter_grad_node != accumulators_with_grad_node_.end()) {
                iter = iter_grad_node->second.find(var.get());
                if (iter != iter_grad_node->second.end()) {
                  flag_find_grad = true;
                  break;
                }
              }
            }
            PADDLE_ENFORCE_EQ(
                flag_find_grad, true,
                platform::errors::NotFound(
                    "Cannot find gradient of variable %s", var->Name()));
          }
402

403 404
          // leaf_accumulators_ : hooks and accumulate-grad for leaf tensor,
          // it should be orderly and not reapeated.
405
          if (var->IsLeafGrad()) {
406 407 408 409
            if (std::find(leaf_accumulators_.begin(), leaf_accumulators_.end(),
                          iter->second.get()) == leaf_accumulators_.end()) {
              leaf_accumulators_.push_back(iter->second.get());
            }
410 411 412 413

            if (iter->second->HasInnerVar()) {
              var = iter->second->InnerVar();
            }
414 415
          }

416 417 418
          if (var->OverridedStopGradient() || iter->second->RefCnt() > 1) {
            auto tmp_var = std::make_shared<VariableWrapper>(var->Name());
            tmp_var->SetType(var->Type());
419
            tmp_var->SetForwardDataType(var->ForwardDataType());
420 421 422 423
            var = tmp_var;
            need_accu_var_list_.emplace_back(iter->second.get(), var);
            VLOG(10) << "create temporary var of " << var->Name()
                     << " for sum gradient within this graph!";
424
          } else if (!inplace_grad_name_map.empty() &&
425 426
                     inplace_grad_name_map.count(pair.first) &&
                     bwd_ins.count(inplace_grad_name_map.at(pair.first))) {
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
            // When calculate Inplace grad op, create a new output var.
            // If a tmp var has been created, there is no need to create it
            // again.
            for (auto& in_var :
                 bwd_ins.at(inplace_grad_name_map.at(pair.first))) {
              if (in_var == var) {
                auto tmp_var = std::make_shared<VariableWrapper>(var->Name());
                tmp_var->SetType(var->Type());
                tmp_var->SetForwardDataType(var->ForwardDataType());
                inplace_output_grad_var_list_.emplace_back(var, tmp_var);
                var = tmp_var;
                VLOG(10) << "Inplace grad op does not use the Inplace "
                            "strategy, a temporary output var ("
                         << var->Name() << ") will be created.";
                break;
              }
            }
444
          }
445 446 447
        }
      }

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
      VLOG(4) << "Check whether there is any inplace operation affecting "
                 "gradient calculation.";
      for (auto& pair : bwd_ins) {
        for (auto& var_wrapper : pair.second) {
          auto wrapper_version_snapshot = var_wrapper->InplaceVersionSnapshot();
          auto tensor_version =
              var_wrapper->MutableVar()->CurrentInplaceVersion();
          PADDLE_ENFORCE_EQ(
              tensor_version, wrapper_version_snapshot,
              platform::errors::PermissionDenied(
                  "Tensor '%s' used in gradient computation in grad op '%s' "
                  "has been "
                  "modified by an inplace operation. "
                  "Its version is %s but the expected version is %s. "
                  "Please fix your code to void calling an inplace operator "
                  "after using the Tensor which will used in gradient "
                  "computation.",
                  var_wrapper->Name(), cur_op.Type(), tensor_version,
                  wrapper_version_snapshot));

          VLOG(6) << " The version of Tensor '" << var_wrapper->Name()
                  << "' is [ " << wrapper_version_snapshot << " ]";
        }
      }

473 474 475 476 477 478 479 480 481 482 483 484 485
      /**
       * [ Why need temporary inputs here? ]
       *
       * - Hook execution should not change original input tensor.
       *   User can register hook for Tensor's gradient, It is expected
       *   that the hook only affects the gradient of the backward
       *   propagation, and does not affect the gradient value input
       *   as the hook.
       * - use `tmp_ins_ptr`, only copy bwd_ins when the var in bwd_ins
       *   hold hooks
       */
      auto tmp_ins_ptr = CallGradientHooks(bwd_ins, cur_op.Type());

486 487
      {
        VLOG(3) << "Start to execute grad op " << cur_op.Type();
488 489 490
        try {
          if (tmp_ins_ptr == nullptr) {
            OpBase::Run(cur_op.InnerOp(), bwd_ins, tmp_outs, cur_op.Attrs(),
491
                        cur_op.DefaultAttrsMap(), cur_op.place());
492 493
          } else {
            OpBase::Run(cur_op.InnerOp(), *tmp_ins_ptr, tmp_outs,
494 495
                        cur_op.Attrs(), cur_op.DefaultAttrsMap(),
                        cur_op.place());
496 497 498 499 500 501 502
          }
        } catch (platform::EnforceNotMet& exception) {
          Clear();
          throw std::move(exception);
        } catch (std::exception& ex) {
          Clear();
          PADDLE_THROW(platform::errors::External("%s", ex.what()));
503
        }
504 505
      }

506 507 508 509
      for (auto& pair : inplace_output_grad_var_list_) {
        *pair.first = std::move(*pair.second);
      }

510 511 512 513 514 515 516 517 518 519
      // Step 2: Sum Gradient of This graph
      for (auto& pair : need_accu_var_list_) {
        pair.first->SumGrad(std::move(pair.second), cur_op.id());
      }

      // Step 3: Call Hooks && Sum Gradient with Pre-Graph && Call BackwardHooks
      for (auto* accumulator : leaf_accumulators_) {
        if (!accumulator->SumGradCompleted()) {
          continue;
        }
520 521
        // 1. Call Hooks for `inner_var_`
        accumulator->CallGradientHooks();
522

523
        // 2. Sum Gradient `inner_var_` to `var_` of Current or Previous Graph
524 525
        accumulator->AccumulateGrad();

526 527
        // 3. Call backward Hooks for `var_`
        accumulator->CallReduceHooks();
528 529
      }

530
      need_accu_var_list_.clear();
531
      inplace_output_grad_var_list_.clear();
532
      leaf_accumulators_.clear();
533

534
      if (!retain_graph_) {
535
        VLOG(3) << "Remove op after op " << cur_op.Type() << " runs";
536 537
        cur_op.ClearBackwardTrace();
      }
538 539 540 541
    }

    // Step 3: Collect ready ops
    for (auto& grad_pending_node : shared_cur_node->GradPendingNodes()) {
542 543 544
      PADDLE_ENFORCE_NOT_NULL(
          grad_pending_node,
          platform::errors::NotFound("Grad pending node is nullptr."));
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
      auto iter = node_deps_.find(grad_pending_node.get());
      if (iter == node_deps_.end()) {
        continue;
      }

      if (--(iter->second) == 0) {
        q.push(grad_pending_node);
      }
    }
  }
  Clear();

  VLOG(1) << "Backward op number: " << op_num;
}

void BasicEngine::Clear() {
561
  init_nodes_.clear();
562 563
  node_deps_.clear();
  accumulators_.clear();
564
  accumulators_with_grad_node_.clear();
565
  need_accu_var_list_.clear();
566
  leaf_accumulators_.clear();
567 568 569 570
}

}  // namespace imperative
}  // namespace paddle