engine.h 2.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <cstddef>
#include <cstdint>
J
Jiabin Yang 已提交
19 20
#include <memory>
#include <unordered_map>
21
#include <unordered_set>
J
Jiabin Yang 已提交
22 23 24 25 26
#include <utility>
#include <vector>
#include "paddle/fluid/imperative/backward_strategy.h"
#include "paddle/fluid/imperative/gradient_accumulator.h"
#include "paddle/fluid/imperative/layer.h"
27 28 29 30

namespace paddle {
namespace imperative {

J
Jiabin Yang 已提交
31 32 33 34
// It seems there is no need for Engine to be an
// singleton, we can have multi-engine to run
// mutil-graoh. For future use we may expose a interface
// to Python to support
35 36
class Engine {
 public:
J
Jiabin Yang 已提交
37 38 39
  virtual ~Engine() = default;
  virtual void Execute() = 0;
  virtual void Init(VarBase* var, const detail::BackwardStrategy& strategy) = 0;
40 41
};

J
Jiabin Yang 已提交
42 43 44 45 46 47 48 49 50
class BasicEngine : public Engine {
 public:
  void Init(VarBase* var, const detail::BackwardStrategy& strategy) override;

  void Execute() override;

 private:
  void PrepareDeps();

51 52
  void CheckBackwardInputs(OpBase* op);

J
Jiabin Yang 已提交
53 54
  void PrepareGradAccumulators(OpBase* op);

55 56
  void SumGradient(OpBase* op, std::shared_ptr<VariableWrapper> src,
                   VariableWrapper* dst);
J
Jiabin Yang 已提交
57 58 59

  // TODO(jiabin): maybe we can optimize the performance of engine by cache the
  // result
60
  void Clear() {
J
Jiabin Yang 已提交
61 62 63 64 65
    init_ops_.clear();
    op_deps_.clear();
    accumulators_.clear();
  }

66
  std::vector<std::shared_ptr<OpBase>> init_ops_;
J
Jiabin Yang 已提交
67 68
  detail::BackwardStrategy backward_strategy_;
  std::unordered_map<OpBase*, size_t> op_deps_;
69
  std::unordered_map<VariableWrapper*, std::unique_ptr<GradientAccumulator>>
J
Jiabin Yang 已提交
70
      accumulators_;
71

72
  std::vector<std::pair<VariableWrapper*, std::shared_ptr<VariableWrapper>>>
73
      need_accu_var_list_;
J
Jiabin Yang 已提交
74
};
75 76 77

}  // namespace imperative
}  // namespace paddle