engine.h 3.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <cstddef>
#include <cstdint>
J
Jiabin Yang 已提交
19 20
#include <memory>
#include <unordered_map>
21
#include <unordered_set>
J
Jiabin Yang 已提交
22 23 24 25 26
#include <utility>
#include <vector>
#include "paddle/fluid/imperative/backward_strategy.h"
#include "paddle/fluid/imperative/gradient_accumulator.h"
#include "paddle/fluid/imperative/layer.h"
27 28 29 30

namespace paddle {
namespace imperative {

J
Jiabin Yang 已提交
31 32 33 34
// It seems there is no need for Engine to be an
// singleton, we can have multi-engine to run
// mutil-graoh. For future use we may expose a interface
// to Python to support
35 36
class Engine {
 public:
J
Jiabin Yang 已提交
37 38 39 40 41
  virtual ~Engine() = default;
  virtual void Execute() = 0;
  virtual void Init(VarBase* var, const detail::BackwardStrategy& strategy) = 0;
  virtual void RunOp(imperative::OpBase* op, const NameVarBaseMap& ins,
                     const NameVarBaseMap& outs, const platform::Place& place);
42

J
Jiabin Yang 已提交
43 44 45 46 47 48
  virtual void RemoveOp(OpBase* op) {
    PADDLE_ENFORCE_NOT_NULL(op, "Cannot remove null op");
    auto iter = grad_ops_.find(op);
    PADDLE_ENFORCE_EQ(iter != grad_ops_.end(), true, "Op is not inside tracer");
    grad_ops_.erase(iter);
  }
49

J
Jiabin Yang 已提交
50 51 52
  void InsertOp(OpBase* op, std::shared_ptr<OpBase> op_shared) {
    grad_ops_[op] = std::move(op_shared);
  }
53

54 55 56 57 58 59
  const std::unordered_set<VarBase*>& GradVars() const { return grad_vars_; }

  const std::unordered_map<OpBase*, std::shared_ptr<OpBase>>& GradOps() const {
    return grad_ops_;
  }

60 61 62 63 64 65 66 67
  void InsertGradVar(VarBase* grad) { grad_vars_.emplace(grad); }

  bool IsGrad(VarBase* var) { return grad_vars_.count(var) > 0; }

  void Clear() {
    grad_ops_.clear();
    grad_vars_.clear();
  }
68

J
Jiabin Yang 已提交
69 70 71
 private:
  std::unordered_map<OpBase*, std::shared_ptr<OpBase>>
      grad_ops_;  // opBase for remove - grad_op
72
  std::unordered_set<VarBase*> grad_vars_;
73 74
};

J
Jiabin Yang 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87
class BasicEngine : public Engine {
 public:
  BasicEngine() = default;

  void Init(VarBase* var, const detail::BackwardStrategy& strategy) override;

  ~BasicEngine() override = default;

  void Execute() override;

 private:
  void PrepareDeps();

88 89 90
  void CheckBackwardInputs(OpBase* op);

  void SetBackwardOutputs(OpBase* op);
J
Jiabin Yang 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

  void PrepareGradAccumulators(OpBase* op);

  void SumGradient(OpBase* op, std::shared_ptr<VarBase> src, VarBase* dst);

  // TODO(jiabin): maybe we can optimize the performance of engine by cache the
  // result
  void CleanEngine() {
    init_ops_.clear();
    op_deps_.clear();
    accumulators_.clear();
    Clear();
  }

  std::vector<OpBase*> init_ops_;
  detail::BackwardStrategy backward_strategy_;
  std::unordered_map<OpBase*, size_t> op_deps_;
  std::unordered_map<VarBase*, std::unique_ptr<GradientAccumulator>>
      accumulators_;
110 111 112

  std::vector<std::pair<VarBase*, std::shared_ptr<VarBase>>>
      need_accu_var_list_;
J
Jiabin Yang 已提交
113
};
114 115 116

}  // namespace imperative
}  // namespace paddle