builder.cc 5.9 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include "lite/backends/npu/builder.h"
Y
Yan Chunwei 已提交
16
#include <mutex>  // NOLINT
17 18
#include <utility>
#include "lite/backends/npu/runtime.h"
Y
Yan Chunwei 已提交
19 20 21 22 23

namespace paddle {
namespace lite {
namespace npu {

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
// Build HIAI IR graph to om model, and store om model data into lite tensor
bool BuildModel(std::vector<ge::Operator>& inputs,   // NOLINT
                std::vector<ge::Operator>& outputs,  // NOLINT
                lite::Tensor* model_data) {
  LOG(INFO) << "[NPU] Build model.";
  CHECK_GT(inputs.size(), 0);
  CHECK_GT(outputs.size(), 0);
  CHECK_NE(model_data, 0);
  // build IR graph to om model
  ge::Graph ir_graph("graph");
  ir_graph.SetInputs(inputs).SetOutputs(outputs);
  ge::Model om_model("model", "model");
  om_model.SetGraph(ir_graph);
  domi::HiaiIrBuild ir_build;
  domi::ModelBufferData om_model_buf;
  if (!ir_build.CreateModelBuff(om_model, om_model_buf)) {
    LOG(WARNING) << "[NPU] CreateModelBuff failed!";
    return false;
  }
  if (!ir_build.BuildIRModel(om_model, om_model_buf)) {
    LOG(WARNING) << "[NPU] BuildIRModel failed!";
    return false;
  }
  // store om model into tensor
  model_data->Resize({om_model_buf.length});
  memcpy(model_data->mutable_data<int8_t>(),
         om_model_buf.data,
         om_model_buf.length);
  ir_build.ReleaseModelBuff(om_model_buf);
  return true;
}

Y
Yan Chunwei 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69
std::string UniqueName(const std::string& prefix) {
  static std::mutex counter_mtx;
  static std::unordered_map<std::string, int> counter_map;
  std::unique_lock<std::mutex> counter_lck(counter_mtx);
  int counter = 1;
  auto it = counter_map.find(prefix);
  if (it == counter_map.end()) {
    counter_map[prefix] = counter;
  } else {
    counter = ++(it->second);
  }
  return prefix + "_" + std::to_string(counter);
}

70
ge::DataType CvtPrecisionType(PrecisionType itype) {
Y
Yan Chunwei 已提交
71 72 73 74 75 76 77 78 79 80 81 82
  ge::DataType otype = ge::DT_FLOAT;
  switch (itype) {
    case PRECISION(kFloat):
      otype = ge::DT_FLOAT;
      break;
    case PRECISION(kInt8):
      otype = ge::DT_INT8;
      break;
    case PRECISION(kInt32):
      otype = ge::DT_INT32;
      break;
    default:
83 84
      LOG(FATAL) << "[NPU] Can not convert precision type("
                 << PrecisionToStr(itype) << ") from Lite to NPU";
Y
Yan Chunwei 已提交
85 86 87 88 89
      break;
  }
  return otype;
}

90
ge::Format CvtDataLayoutType(DataLayoutType itype) {
Y
Yan Chunwei 已提交
91 92 93 94 95 96 97
  ge::Format otype = ge::FORMAT_NCHW;
  switch (itype) {
    case DATALAYOUT(kNCHW):
      otype = ge::FORMAT_NCHW;
      break;
    // TODO(hong19860320) support more data layout type
    default:
98
      LOG(FATAL) << "[NPU] Can not convert data layout type("
Y
Yan Chunwei 已提交
99 100 101 102 103 104
                 << DataLayoutToStr(itype) << ") from Lite to NPU";
      break;
  }
  return otype;
}

105 106 107 108
ge::TensorPtr CvtTensor(lite::Tensor* in_tensor,
                        std::vector<int64_t> out_shape,
                        PrecisionType in_ptype,
                        DataLayoutType in_ltype) {
Y
Yan Chunwei 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
  uint8_t* in_data = nullptr;
  auto in_size = in_tensor->dims().production();
  auto in_shape = in_tensor->dims().Vectorize();
  if (out_shape.empty()) {
    out_shape = in_shape;
  }
  int in_bytes;
  if (in_ptype == PRECISION(kFloat)) {
    in_data = reinterpret_cast<uint8_t*>(in_tensor->mutable_data<float>());
    in_bytes = in_size * sizeof(float);
  } else if (in_ptype == PRECISION(kInt32)) {
    in_data = reinterpret_cast<uint8_t*>(in_tensor->mutable_data<int32_t>());
    in_bytes = in_size * sizeof(int32_t);
  } else if (in_ptype == PRECISION(kInt8)) {
    in_data = reinterpret_cast<uint8_t*>(in_tensor->mutable_data<int8_t>());
    in_bytes = in_size * sizeof(int8_t);
  } else {
126
    LOG(FATAL) << "[NPU] Unknow precision type " << PrecisionToStr(in_ptype);
Y
Yan Chunwei 已提交
127
  }
128 129
  ge::DataType out_ptype = CvtPrecisionType(in_ptype);
  ge::Format out_ltype = CvtDataLayoutType(in_ltype);
Y
Yan Chunwei 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142

  ge::TensorDesc out_desc(ge::Shape(out_shape), out_ltype, out_ptype);
  CHECK_EQ(out_ltype, ge::FORMAT_NCHW);

  auto out_size = out_desc.GetShape().GetShapeSize();
  CHECK_EQ(out_size, in_size);

  ge::TensorPtr out_tensor = std::make_shared<ge::Tensor>();
  out_tensor->SetTensorDesc(out_desc);
  out_tensor->SetData(in_data, in_bytes);
  return out_tensor;
}

143 144
int CvtActMode(std::string act_type) {
  int act_mode = 1;
145
  if (act_type == "sigmoid") {
146 147 148 149 150
    act_mode = 0;
  } else if (act_type == "relu") {
    act_mode = 1;
  } else if (act_type == "tanh") {
    act_mode = 2;
151 152
  } else if (act_type == "relu_clipped") {
    act_mode = 3;
153 154
  } else if (act_type == "elu") {
    act_mode = 4;
155 156
  } else if (act_type == "leaky_relu") {
    act_mode = 5;
157 158 159 160 161 162
  } else if (act_type == "abs") {
    act_mode = 6;
  } else if (act_type == "softsign") {
    act_mode = 8;
  } else if (act_type == "softplus") {
    act_mode = 9;
163
  } else if (act_type == "hard_sigmoid") {
164 165 166 167 168 169 170 171
    act_mode = 10;
  } else {
    // TODO(hong19860320) support more activation mode
    LOG(FATAL) << "[NPU] Unsupported activation type " << act_type;
  }
  return act_mode;
}

Y
Yan Chunwei 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
bool HasInputArg(const OpInfo* op_info,
                 const Scope* scope,
                 const std::string& argname) {
  auto iarg_names = op_info->input_argnames();
  if (std::find(iarg_names.begin(), iarg_names.end(), argname) !=
      iarg_names.end()) {
    auto inputs = op_info->Input(argname);
    if (inputs.empty()) {
      return false;
    }
    auto var_name = inputs.front();
    auto var = scope->FindVar(var_name);
    return var != nullptr;
  } else {
    return false;
  }
}

}  // namespace npu
}  // namespace lite
}  // namespace paddle