engine.h 2.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <cstddef>
#include <cstdint>
J
Jiabin Yang 已提交
19 20 21 22 23 24 25
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/imperative/backward_strategy.h"
#include "paddle/fluid/imperative/gradient_accumulator.h"
#include "paddle/fluid/imperative/layer.h"
26 27 28 29

namespace paddle {
namespace imperative {

J
Jiabin Yang 已提交
30 31 32 33
// It seems there is no need for Engine to be an
// singleton, we can have multi-engine to run
// mutil-graoh. For future use we may expose a interface
// to Python to support
34 35
class Engine {
 public:
J
Jiabin Yang 已提交
36 37 38 39 40
  virtual ~Engine() = default;
  virtual void Execute() = 0;
  virtual void Init(VarBase* var, const detail::BackwardStrategy& strategy) = 0;
  virtual void RunOp(imperative::OpBase* op, const NameVarBaseMap& ins,
                     const NameVarBaseMap& outs, const platform::Place& place);
41

J
Jiabin Yang 已提交
42 43 44 45 46 47
  virtual void RemoveOp(OpBase* op) {
    PADDLE_ENFORCE_NOT_NULL(op, "Cannot remove null op");
    auto iter = grad_ops_.find(op);
    PADDLE_ENFORCE_EQ(iter != grad_ops_.end(), true, "Op is not inside tracer");
    grad_ops_.erase(iter);
  }
48

J
Jiabin Yang 已提交
49 50 51 52
  void InsertOp(OpBase* op, std::shared_ptr<OpBase> op_shared) {
    grad_ops_[op] = std::move(op_shared);
  }
  void Clear() { grad_ops_.clear(); }
53

J
Jiabin Yang 已提交
54 55 56
 private:
  std::unordered_map<OpBase*, std::shared_ptr<OpBase>>
      grad_ops_;  // opBase for remove - grad_op
57 58
};

J
Jiabin Yang 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
class BasicEngine : public Engine {
 public:
  BasicEngine() = default;

  void Init(VarBase* var, const detail::BackwardStrategy& strategy) override;

  ~BasicEngine() override = default;

  void Execute() override;

 private:
  void PrepareDeps();

  bool CheckBackwardInputs(OpBase* op);

  void PrepareGradAccumulators(OpBase* op);

  void SumGradient(OpBase* op, std::shared_ptr<VarBase> src, VarBase* dst);

  // TODO(jiabin): maybe we can optimize the performance of engine by cache the
  // result
  void CleanEngine() {
    init_ops_.clear();
    op_deps_.clear();
    accumulators_.clear();
    Clear();
  }

  std::vector<OpBase*> init_ops_;
  detail::BackwardStrategy backward_strategy_;
  std::unordered_map<OpBase*, size_t> op_deps_;
  std::unordered_map<VarBase*, std::unique_ptr<GradientAccumulator>>
      accumulators_;
};
93 94 95

}  // namespace imperative
}  // namespace paddle