graph.h 6.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <xtcl/xtcl.h>
#include <memory>
#include <string>
#include <unordered_map>
21
#include <utility>
22 23 24 25 26 27 28 29 30
#include <vector>
#include "lite/core/op_lite.h"
#include "lite/core/tensor.h"

namespace paddle {
namespace lite {
namespace subgraph {
namespace xpu {

31 32
// Graph and node is defined to collect all of converted XTCL IR nodes
class Node {
33
 public:
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
  enum class Role {
    kVar = 0,
    kConst,
    kData,
  };

  Node(std::shared_ptr<xtcl::xExpr> data,
       PrecisionType precision,
       DataLayoutType layout,
       Role role)
      : data_(data), precision_(precision), layout_(layout), role_(role) {}
  Node(PrecisionType precision, DataLayoutType layout, Role role)
      : precision_(precision), layout_(layout), role_(role) {}

  void set_data(std::shared_ptr<xtcl::xExpr> data) { data_ = data; }
49 50
  void set_precision(PrecisionType precision) { precision_ = precision; }
  void set_layout(DataLayoutType layout) { layout_ = layout; }
51
  void set_role(Role role) { role_ = role; }
52

53
  std::shared_ptr<xtcl::xExpr> data() { return data_; }
54 55
  PrecisionType precision() const { return precision_; }
  DataLayoutType layout() const { return layout_; }
56 57 58 59
  Role role() const { return role_; }
  bool is_var() const { return role_ == Role::kVar; }
  bool is_const() const { return role_ == Role::kConst; }
  bool is_data() const { return role_ == Role::kData; }
60 61

 private:
62
  std::shared_ptr<xtcl::xExpr> data_{nullptr};
63 64
  PrecisionType precision_{PRECISION(kFloat)};
  DataLayoutType layout_{DATALAYOUT(kNCHW)};
65
  Role role_{Role::kVar};
66 67
};

68 69
class Graph {
 public:
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
  int Add(const std::string& name, std::shared_ptr<Node> node);

  // Variable node
  std::shared_ptr<Node> Add(const std::string& name,
                            const xtcl::xExpr& layer,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW));

  // Const or data node
  std::shared_ptr<Node> Add(const std::string& name,
                            const Tensor& tensor,
                            std::vector<int64_t> shape,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW));

  std::shared_ptr<Node> Add(const std::string& name,
                            const Tensor& tensor,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, tensor, tensor.dims().Vectorize(), precision, layout);
  }
91

92 93 94 95 96 97
  std::shared_ptr<Node> Add(const std::string& name,
                            const Tensor& tensor,
                            DDim dims,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, tensor, dims.Vectorize(), precision, layout);
98
  }
99

100
  // Const node
101
  template <typename T>
102 103 104 105
  std::shared_ptr<Node> Add(const std::string& name,
                            const std::vector<T>& data,
                            std::vector<int64_t> shape = {},
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
106
    const std::type_info& info = typeid(T);
107
    PrecisionType precision = PRECISION(kFloat);
108
    if (info == typeid(float)) {
109
      precision = PRECISION(kFloat);
110
    } else if (info == typeid(int8_t)) {
111
      precision = PRECISION(kFloat);
112
    } else if (info == typeid(int32_t)) {
113
      precision = PRECISION(kInt32);
114 115 116 117 118 119 120 121 122 123 124 125 126 127
    } else {
      LOG(FATAL) << "[XPU] Unknow data type " << info.name();
    }
    if (shape.empty()) {
      shape = {static_cast<int64_t>(data.size())};
    } else {
      int size = 1;
      for (auto i : shape) {
        size *= i;
      }
      CHECK_EQ(data.size(), size);
    }
    Tensor tensor;
    tensor.Resize(shape);
128
    tensor.set_persistable(true);
129 130 131
    std::memcpy(reinterpret_cast<uint8_t*>(tensor.mutable_data<T>()),
                reinterpret_cast<const uint8_t*>(data.data()),
                data.size() * sizeof(T));
132
    return Add(name, tensor, precision, layout);
133 134 135
  }

  template <typename T>
136 137 138 139 140
  std::shared_ptr<Node> Add(const std::string& name,
                            const std::vector<T>& data,
                            DDim dims,
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, data, dims.Vectorize(), layout);
141 142 143
  }

  template <typename T>
144 145 146 147
  std::shared_ptr<Node> Add(const std::string& name,
                            T value,
                            std::vector<int64_t> shape = {1},
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
148 149 150 151 152
    int64_t size = 1;
    for (auto i : shape) {
      size *= i;
    }
    std::vector<T> data(size, value);
153
    return Add(name, data, shape, layout);
154 155 156
  }

  template <typename T>
157 158 159 160 161
  std::shared_ptr<Node> Add(const std::string& name,
                            T value,
                            DDim dims,
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, value, dims.Vectorize(), layout);
162 163 164
  }

  // Data node
165 166 167 168 169 170 171 172 173 174
  std::shared_ptr<Node> Add(const std::string& name,
                            std::vector<int64_t> shape,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW));

  std::shared_ptr<Node> Add(const std::string& name,
                            DDim dims,
                            PrecisionType precision = PRECISION(kFloat),
                            DataLayoutType layout = DATALAYOUT(kNCHW)) {
    return Add(name, dims.Vectorize(), precision, layout);
175 176
  }

177 178 179
  std::shared_ptr<Node> Get(const std::string& name) {
    CHECK(Has(name)) << "[XPU] Node " << name << " not found.";
    return nodes_.at(name).back();
180 181
  }

182
  bool Has(const std::string& name) {
183 184 185 186 187 188 189 190 191
    return nodes_.find(name) != nodes_.end();
  }

 public:
  // XPU network builder and constant tensors
  xtcl::network::xNetworkBuilder builder_;
  xtcl::network::xTensorCompiler::ParamNDArrayMap params_;

 private:
192
  std::unordered_map<std::string, std::vector<std::shared_ptr<Node>>> nodes_;
193 194 195 196 197 198
};

}  // namespace xpu
}  // namespace subgraph
}  // namespace lite
}  // namespace paddle