subgraph_compute.cc 7.6 KB
Newer Older
H
hong19860320 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/kernels/apu/subgraph_compute.h"
#include <dlfcn.h>
#include <sys/time.h>
#include <time.h>
#include <utility>
#include "lite/backends/apu/device.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/apu/bridges/graph.h"
#include "lite/kernels/apu/bridges/paddle_use_bridges.h"
#include "lite/kernels/apu/bridges/utility.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace apu {

31
bool SubgraphEngine::BuildDeviceProgram() {
32 33 34 35
  if (!origin_program_) {
    BuildOriginProgram();
  }

H
hong19860320 已提交
36
  unsigned int version;
37
  Neuron_getVersion(&version);
H
hong19860320 已提交
38 39 40 41
  VLOG(3) << "Neuron Adapter version: " << version;

  int status = 0;
  subgraph::apu::Graph graph;
42
  int neuron_errCode = NeuronModel_create(&model_);
H
hong19860320 已提交
43
  if (NEURON_NO_ERROR != neuron_errCode) {
44
    LOG(WARNING) << "[APU] Failed to create the neuron model!";
45
    return false;
H
hong19860320 已提交
46 47 48 49 50 51 52 53
  }
  graph.set_model(model_);
  graph.set_input_names(input_names_);
  graph.set_output_names(output_names_);

  // Convert all of ops and their input vars and weights and added into the APU
  // NIR graph
  const auto& bridges = subgraph::Registry::Instance();
54 55
  const auto& insts = origin_program_->instructions(kRootBlockIdx);
  for (auto& inst : insts) {
H
hong19860320 已提交
56 57 58 59 60 61
    auto op = const_cast<OpLite*>(inst.op());
    CHECK(op);
    op->CheckShape();
    op->InferShape();
    std::string op_type = op->op_info()->Type();
    if (!bridges.Exists(op_type, TARGET(kAPU))) {
62
      return false;
H
hong19860320 已提交
63 64 65 66 67 68 69 70
    }

    auto kernel = inst.kernel();
    status |=
        bridges.Select(op_type, TARGET(kAPU))(reinterpret_cast<void*>(&graph),
                                              const_cast<OpLite*>(op),
                                              const_cast<KernelBase*>(kernel));
    if (subgraph::CHECK_FAILED(status)) {
71
      return false;
H
hong19860320 已提交
72 73 74
    }
  }

75 76
  // Get the index of input tensors
  std::vector<uint32_t> input_indices;
H
hong19860320 已提交
77
  for (int i = 0; i < input_names_.size(); i++) {
78 79 80 81 82 83
    CHECK(graph.Has(input_names_[i])) << "[APU] Failed to find input node "
                                      << input_names_[i];
    auto index = graph.Get(input_names_[i])->index();
    input_indices.push_back(index);
    VLOG(3) << "[APU] Input[" << i << "] name " << input_names_[i] << " dims "
            << origin_itensors_[i]->dims() << " index " << index;
H
hong19860320 已提交
84 85
  }

86 87
  // Get the index of output tensors
  std::vector<uint32_t> output_indices;
H
hong19860320 已提交
88
  for (int i = 0; i < output_names_.size(); i++) {
89 90
    CHECK(graph.Has(output_names_[i])) << "[APU] Failed to find output node "
                                       << output_names_[i];
H
hong19860320 已提交
91
    origin_otensors_[i]->mutable_data<int8_t>();
92 93 94 95
    auto index = graph.Get(output_names_[i])->index();
    output_indices.push_back(index);
    VLOG(3) << "[APU] Output[" << i << "] name " << output_names_[i] << " dims "
            << origin_otensors_[i]->dims() << " index " << index;
H
hong19860320 已提交
96 97
  }

98 99 100 101 102 103
  // Indentify the input and output tensors of the neuron model
  NeuronModel_identifyInputsAndOutputs(model_,
                                       input_indices.size(),
                                       &input_indices[0],
                                       output_indices.size(),
                                       &output_indices[0]);
104
  neuron_errCode = NeuronModel_finish(model_);
H
hong19860320 已提交
105
  if (NEURON_NO_ERROR != neuron_errCode) {
106
    LOG(WARNING) << "[APU] Fail to create NIR model:" << neuron_errCode;
107
    return false;
H
hong19860320 已提交
108 109 110 111 112 113 114 115 116
  }
  VLOG(3) << "[APU] APU NIR model created!";

  auto GetCurrentUS = []() -> double {
    struct timeval time;
    gettimeofday(&time, NULL);
    return 1e+6 * time.tv_sec + time.tv_usec;
  };
  auto start_time = GetCurrentUS();
117
  compilation_ = lite::apu::Device::Global().Build(model_);
H
hong19860320 已提交
118 119
  if (compilation_ == nullptr) {
    LOG(WARNING) << "[APU] Build APU DLA model failed!";
120
    return false;
H
hong19860320 已提交
121 122 123
  }
  VLOG(3) << "[APU] APU DLA model created, Build cost "
          << GetCurrentUS() - start_time << " us";
124
  return true;
H
hong19860320 已提交
125 126
}

127
bool SubgraphEngine::LaunchDeviceProgram() {
H
hong19860320 已提交
128 129 130 131 132 133 134
  auto GetCurrentUS = []() -> double {
    struct timeval time;
    gettimeofday(&time, NULL);
    return 1e+6 * time.tv_sec + time.tv_usec;
  };

  auto start_time = GetCurrentUS();
135 136
  NeuronExecution* run = NULL;
  int neuron_errCode = NeuronExecution_create(compilation_, &run);
H
hong19860320 已提交
137 138
  if (NEURON_NO_ERROR != neuron_errCode) {
    LOG(WARNING) << "[APU] Build APU runtime failed!";
139
    return false;
H
hong19860320 已提交
140 141 142 143
  }

  // Set input buffer
  for (size_t i = 0; i < origin_itensors_.size(); i++) {
144 145
    auto origin_data = origin_itensors_[i]->mutable_data<int8_t>();
    auto converted_data = reinterpret_cast<uint8_t*>(origin_data);
H
hong19860320 已提交
146
    for (int j = 0; j < origin_itensors_[i]->data_size(); j++) {
147 148
      converted_data[j] =
          static_cast<uint8_t>(static_cast<int16_t>(origin_data[j]) + 128);
H
hong19860320 已提交
149
    }
150
    NeuronExecution_setInput(
151
        run, i, NULL, converted_data, origin_itensors_[i]->memory_size());
H
hong19860320 已提交
152 153 154 155
  }

  // Set output buffer
  for (size_t i = 0; i < origin_otensors_.size(); i++) {
156 157
    NeuronExecution_setOutput(
        run,
H
hong19860320 已提交
158 159 160 161 162 163
        i,
        NULL,
        reinterpret_cast<void*>(origin_otensors_[i]->raw_data()),
        origin_otensors_[i]->memory_size());
  }

164
  neuron_errCode = NeuronExecution_compute(run);
H
hong19860320 已提交
165 166
  if (NEURON_NO_ERROR != neuron_errCode) {
    LOG(WARNING) << "Fail to run execution!" << neuron_errCode;
167
    return false;
H
hong19860320 已提交
168 169 170
  }

  for (size_t i = 0; i < origin_otensors_.size(); i++) {
171 172
    auto converted_data = origin_otensors_[i]->mutable_data<int8_t>();
    auto origin_data = reinterpret_cast<uint8_t*>(converted_data);
H
hong19860320 已提交
173
    for (int j = 0; j < origin_otensors_[i]->data_size(); j++) {
174 175
      converted_data[j] =
          static_cast<int8_t>(static_cast<int16_t>(origin_data[j]) - 128);
H
hong19860320 已提交
176 177
    }
  }
178
  NeuronExecution_free(run);
H
hong19860320 已提交
179
  VLOG(3) << "[APU] Process cost " << GetCurrentUS() - start_time << " us";
180
  return true;
H
hong19860320 已提交
181 182
}

183 184 185 186 187 188 189 190 191
SubgraphEngine::~SubgraphEngine() {
  if (compilation_) {
    NeuronCompilation_free(compilation_);
  }
  if (model_) {
    NeuronModel_free(model_);
  }
}

H
hong19860320 已提交
192 193 194
void SubgraphCompute::PrepareForRun() {
  auto& param = this->Param<param_t>();
  engine_.reset(new SubgraphEngine(ctx_.get(),
195 196 197
                                   param.block_idx,
                                   param.program_desc,
                                   param.exec_scope,
H
hong19860320 已提交
198
                                   param.input_data_names,
199
                                   param.output_data_names));
H
hong19860320 已提交
200 201 202 203 204
  CHECK(engine_);
}

void SubgraphCompute::Run() {
  CHECK(engine_);
205
  engine_->Run();
H
hong19860320 已提交
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
}

}  // namespace apu
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

REGISTER_LITE_KERNEL(subgraph,
                     kAPU,
                     kInt8,
                     kNCHW,
                     paddle::lite::kernels::apu::SubgraphCompute,
                     def)
    .BindInput("Inputs",
               {LiteType::GetTensorTy(TARGET(kHost),
                                      PRECISION(kInt8),
                                      DATALAYOUT(kNCHW))})
    .BindOutput("Outputs",
                {LiteType::GetTensorTy(TARGET(kHost),
                                       PRECISION(kInt8),
                                       DATALAYOUT(kNCHW))})
    .Finalize();