node.h 4.7 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "lite/core/kernel.h"
#include "lite/core/op_lite.h"

namespace paddle {
namespace lite {
namespace mir {

// Node in a MIR graph.
class Node {
 public:
  std::list<Node*> inlinks;
  std::list<Node*> outlinks;

  Node() = default;

  enum class Role {
    kArg = 0,
    kStmt,
    kNumRoles, /*should be last*/
    kUnk,
  };

  class Stmt {
    // The kernel instances this Statement contains.
    std::vector<std::unique_ptr<KernelBase>> valid_kernels_;
    // TODO(Superjomn) make this a shared_ptr for resource safety.
    std::shared_ptr<OpLite> op_;  // we hold op to run InferShape

   public:
    // Refresh the operator and kernels with the latest OpInfo.
    void ResetOp(const cpp::OpDesc& op_desc,
                 const std::vector<Place>& valid_places,
                 lite::Scope* scope = nullptr);

56
    void ResetKernels(const std::vector<Place>& valid_places);
Y
Yan Chunwei 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
    std::string op_type() const { return op_info()->Type(); }
    const OpInfo* op_info() const;
    OpInfo* mutable_op_info();

    void SetKernels(std::vector<std::unique_ptr<KernelBase>>&& kernels) {
      valid_kernels_ = std::move(kernels);
    }
    std::vector<std::unique_ptr<KernelBase>>& kernels() {
      return valid_kernels_;
    }

    void SetOp(const std::shared_ptr<OpLite>& op) { op_ = op; }
    const std::shared_ptr<OpLite> op() const { return op_; }

    Place place() const;

    KernelBase& picked_kernel();

75 76 77 78 79
    friend std::ostream& operator<<(std::ostream& os, const Stmt& other) {
      os << "Statement " << other.op_type() << " "
         << other.place().DebugString();
      return os;
    }
Y
Yan Chunwei 已提交
80 81 82

    // Description.
    std::string desc;
83 84 85 86 87 88

    // for cuda multi stream
    bool need_sync_{false};
    int stream_id_{0};
    // streams which need to be sync. exclude stream_id_
    std::vector<int> sync_streams_{};
Y
Yan Chunwei 已提交
89 90 91 92 93
  };

  struct Arg {
    std::string name;
    int id{0};
94
    const Type* type{nullptr};
Y
Yan Chunwei 已提交
95 96 97 98 99 100 101
    // Weight is a special kind of argument, it is marked as weight explicitly
    // so that some weight related optimization can take place.
    bool is_weight{false};
    // is_persist indicate that whether is the argument trans from Weight
    // if the need more than one tool operator(eg. io_copy layout calib), the
    // argument between them should be persist to make sure it's only run once
    bool is_persist{false};
102
    int lane{-1};
Y
Yan Chunwei 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
  };

  Arg& AsArg(const std::string& name, int id);

  Arg& AsArg(const std::string& name);

  Stmt& AsStmt(const std::string& op_type,
               std::vector<std::unique_ptr<KernelBase>>&& kernels,
               const std::shared_ptr<OpLite>& op) {
    auto& x = AsStmt();
    x.SetOp(op);
    x.SetKernels(std::move(kernels));
    return x;
  }

  Stmt* stmt() const {
    CHECK(IsStmt());
    return stmt_.get();
  }

  Arg* arg() const {
    CHECK(IsArg());
    return arg_.get();
  }

  // Set roles.
  Arg& AsArg() {
    if (role_ != Role::kUnk) {
      CHECK(role_ == Role::kArg);
      return *arg_;
    }
    role_ = Role::kArg;
    arg_.reset(new Arg);
    return *arg_;
  }
  Stmt& AsStmt() {
    if (role_ != Role::kUnk) {
      CHECK(role_ == Role::kStmt);
      return *stmt_;
    }
    role_ = Role::kStmt;
    stmt_.reset(new Stmt);
    return *stmt_;
  }

  friend std::ostream& operator<<(std::ostream& os, Node& other) {
    os << static_cast<int>(other.role_) << " ";
    if (!other.IsRoleSet()) {
      os << "Unk role node";
    }
    if (other.IsArg()) {
      auto& arg = other.AsArg();
      os << "Argument " << arg.name;
    }
    if (other.IsStmt()) {
      auto& arg = other.AsStmt();
      os << "Statement " << arg.op_type();
    }
    return os;
  }

  // Check roles.
  bool IsRoleSet() const { return role_ != Role::kUnk; }
  bool IsStmt() const { return role_ == Role::kStmt; }
  bool IsArg() const { return role_ == Role::kArg; }

 private:
  // Either stmt_ or argument_ is used.
  std::unique_ptr<Stmt> stmt_;
  std::unique_ptr<Arg> arg_;
  Role role_{Role::kUnk};
};
}  // namespace mir
}  // namespace lite
}  // namespace paddle