graph.h 7.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
22
#include "graph/op/all_ops.h"
23 24 25 26 27 28 29 30
#include "lite/core/op_lite.h"
#include "lite/core/tensor.h"

namespace paddle {
namespace lite {
namespace subgraph {
namespace npu {

31 32
// Graph and node is defined to collect all of converted HiAI IR nodes
class Node {
33
 public:
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
  enum class Role {
    kVar = 0,
    kConst,
    kData,
  };

  Node(std::shared_ptr<ge::Operator> data,
       PrecisionType precision,
       DataLayoutType layout,
       Role role)
      : data_(data), precision_(precision), layout_(layout), role_(role) {}
  Node(PrecisionType precision, DataLayoutType layout, Role role)
      : precision_(precision), layout_(layout), role_(role) {}

  void set_data(std::shared_ptr<ge::Operator> data) { data_ = data; }
49 50
  void set_precision(PrecisionType precision) { precision_ = precision; }
  void set_layout(DataLayoutType layout) { layout_ = layout; }
51
  void set_role(Role role) { role_ = role; }
52

53 54 55 56 57
  template <typename T>
  std::shared_ptr<T> data() {
    return std::static_pointer_cast<T>(data_);
  }
  std::shared_ptr<ge::Operator> data() { return data_; }
58 59
  PrecisionType precision() const { return precision_; }
  DataLayoutType layout() const { return layout_; }
60 61 62
  bool is_var() const { return role_ == Role::kVar; }
  bool is_const() const { return role_ == Role::kConst; }
  bool is_data() const { return role_ == Role::kData; }
63 64

 private:
65
  std::shared_ptr<ge::Operator> data_{nullptr};
66 67
  PrecisionType precision_{PRECISION(kFloat)};
  DataLayoutType layout_{DATALAYOUT(kNCHW)};
68
  Role role_{Role::kVar};
69 70
};

71 72
class Graph {
 public:
73 74 75
  int Add(const std::string& name, std::shared_ptr<Node> node);

  // Variable, const or data node
76
  template <typename T>
77 78 79 80 81 82 83 84
  std::shared_ptr<Node> Add(const std::string& name,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    Node::Role role = Node::Role::kVar;
    if (typeid(T) == typeid(ge::op::Const)) {
      role = Node::Role::kConst;
    } else if (typeid(T) == typeid(ge::op::Data)) {
      role = Node::Role::kData;
85
    }
86 87 88 89 90
    auto node = std::make_shared<Node>(precision, layout, role);
    auto idx = Add(name, node);
    CHECK_GE(idx, 1);
    // Generate a unique name for the created HiAI IR
    node->set_data(std::make_shared<T>(name + "__" + std::to_string(idx)));
91 92 93
    return node;
  }

94 95 96 97 98 99 100 101 102 103 104 105
  // Const or data node
  std::shared_ptr<Node> Add(const std::string& name,
                            const Tensor& tensor,
                            std::vector<int64_t> shape,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW));

  std::shared_ptr<Node> Add(const std::string& name,
                            const Tensor& tensor,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, tensor, tensor.dims().Vectorize(), precision, layout);
106
  }
107

108 109 110 111 112 113
  std::shared_ptr<Node> Add(const std::string& name,
                            const Tensor& tensor,
                            DDim dims,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, tensor, dims.Vectorize(), precision, layout);
114
  }
115

116
  // Const node
117
  template <typename T>
118 119 120 121
  std::shared_ptr<Node> Add(const std::string& name,
                            const std::vector<T>& data,
                            std::vector<int64_t> shape = {},
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
122
    const std::type_info& info = typeid(T);
123
    PrecisionType precision = PRECISION(kFloat);
124
    if (info == typeid(float)) {
125
      precision = PRECISION(kFloat);
126
    } else if (info == typeid(int8_t)) {
127
      precision = PRECISION(kFloat);
128
    } else if (info == typeid(int32_t)) {
129
      precision = PRECISION(kInt32);
130 131 132 133 134 135 136 137 138 139 140 141 142 143
    } else {
      LOG(FATAL) << "[NPU] Unknow data type " << info.name();
    }
    if (shape.empty()) {
      shape = {static_cast<int64_t>(data.size())};
    } else {
      int size = 1;
      for (auto i : shape) {
        size *= i;
      }
      CHECK_EQ(data.size(), size);
    }
    Tensor tensor;
    tensor.Resize(shape);
144
    tensor.set_persistable(true);
145 146 147
    std::memcpy(reinterpret_cast<uint8_t*>(tensor.mutable_data<T>()),
                reinterpret_cast<const uint8_t*>(data.data()),
                data.size() * sizeof(T));
148
    return Add(name, tensor, precision, layout);
149 150 151
  }

  template <typename T>
152 153 154 155 156
  std::shared_ptr<Node> Add(const std::string& name,
                            const std::vector<T>& data,
                            DDim dims,
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, data, dims.Vectorize(), layout);
157 158 159
  }

  template <typename T>
160 161 162 163
  std::shared_ptr<Node> Add(const std::string& name,
                            T value,
                            std::vector<int64_t> shape = {1},
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
164 165 166 167 168
    int64_t size = 1;
    for (auto i : shape) {
      size *= i;
    }
    std::vector<T> data(size, value);
169
    return Add(name, data, shape, layout);
170 171 172
  }

  template <typename T>
173 174 175 176 177
  std::shared_ptr<Node> Add(const std::string& name,
                            T value,
                            DDim dims,
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, value, dims.Vectorize(), layout);
178 179 180
  }

  // Data node
181 182 183 184 185 186 187 188 189 190
  std::shared_ptr<Node> Add(const std::string& name,
                            std::vector<int64_t> shape,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW));

  std::shared_ptr<Node> Add(const std::string& name,
                            DDim dims,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, dims.Vectorize(), precision, layout);
191 192
  }

193 194 195
  std::shared_ptr<Node> Get(std::string name) {
    CHECK(Has(name)) << "[NPU] Node " << name << " not found.";
    return nodes_.at(name).back();
196 197
  }

198
  bool Has(const std::string& name) {
199 200 201 202
    return nodes_.find(name) != nodes_.end();
  }

 private:
203
  std::unordered_map<std::string, std::vector<std::shared_ptr<Node>>> nodes_;
204 205 206 207 208 209
};

}  // namespace npu
}  // namespace subgraph
}  // namespace lite
}  // namespace paddle