subgraph_compute.cc 7.6 KB
Newer Older
H
hong19860320 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/kernels/apu/subgraph_compute.h"
#include <dlfcn.h>
#include <sys/time.h>
#include <time.h>
#include <utility>
#include "lite/backends/apu/device.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/apu/bridges/graph.h"
#include "lite/kernels/apu/bridges/paddle_use_bridges.h"
#include "lite/kernels/apu/bridges/utility.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace apu {

31
bool SubgraphEngine::BuildDeviceProgram() {
H
hong19860320 已提交
32
  unsigned int version;
33
  Neuron_getVersion(&version);
H
hong19860320 已提交
34 35 36 37
  VLOG(3) << "Neuron Adapter version: " << version;

  int status = 0;
  subgraph::apu::Graph graph;
38
  int neuron_errCode = NeuronModel_create(&model_);
H
hong19860320 已提交
39
  if (NEURON_NO_ERROR != neuron_errCode) {
40
    LOG(WARNING) << "[APU] Failed to create the neuron model!";
41
    return false;
H
hong19860320 已提交
42 43 44 45 46 47 48
  }
  graph.set_model(model_);
  graph.set_input_names(input_names_);
  graph.set_output_names(output_names_);

  // Convert all of ops and their input vars and weights and added into the APU
  // NIR graph
49
  if (!origin_program_) {
50 51
    BuildOriginProgram();
  }
H
hong19860320 已提交
52
  const auto& bridges = subgraph::Registry::Instance();
53 54
  const auto& insts = origin_program_->instructions(kRootBlockIdx);
  for (auto& inst : insts) {
H
hong19860320 已提交
55 56 57 58 59 60
    auto op = const_cast<OpLite*>(inst.op());
    CHECK(op);
    op->CheckShape();
    op->InferShape();
    std::string op_type = op->op_info()->Type();
    if (!bridges.Exists(op_type, TARGET(kAPU))) {
61
      return false;
H
hong19860320 已提交
62 63 64 65 66 67 68 69
    }

    auto kernel = inst.kernel();
    status |=
        bridges.Select(op_type, TARGET(kAPU))(reinterpret_cast<void*>(&graph),
                                              const_cast<OpLite*>(op),
                                              const_cast<KernelBase*>(kernel));
    if (subgraph::CHECK_FAILED(status)) {
70
      return false;
H
hong19860320 已提交
71 72 73
    }
  }

74 75
  // Get the index of input tensors
  std::vector<uint32_t> input_indices;
H
hong19860320 已提交
76
  for (int i = 0; i < input_names_.size(); i++) {
77 78 79 80 81 82
    CHECK(graph.Has(input_names_[i])) << "[APU] Failed to find input node "
                                      << input_names_[i];
    auto index = graph.Get(input_names_[i])->index();
    input_indices.push_back(index);
    VLOG(3) << "[APU] Input[" << i << "] name " << input_names_[i] << " dims "
            << origin_itensors_[i]->dims() << " index " << index;
H
hong19860320 已提交
83 84
  }

85 86
  // Get the index of output tensors
  std::vector<uint32_t> output_indices;
H
hong19860320 已提交
87
  for (int i = 0; i < output_names_.size(); i++) {
88 89
    CHECK(graph.Has(output_names_[i])) << "[APU] Failed to find output node "
                                       << output_names_[i];
H
hong19860320 已提交
90
    origin_otensors_[i]->mutable_data<int8_t>();
91 92 93 94
    auto index = graph.Get(output_names_[i])->index();
    output_indices.push_back(index);
    VLOG(3) << "[APU] Output[" << i << "] name " << output_names_[i] << " dims "
            << origin_otensors_[i]->dims() << " index " << index;
H
hong19860320 已提交
95 96
  }

97 98 99 100 101 102
  // Indentify the input and output tensors of the neuron model
  NeuronModel_identifyInputsAndOutputs(model_,
                                       input_indices.size(),
                                       &input_indices[0],
                                       output_indices.size(),
                                       &output_indices[0]);
103
  neuron_errCode = NeuronModel_finish(model_);
H
hong19860320 已提交
104
  if (NEURON_NO_ERROR != neuron_errCode) {
105
    LOG(WARNING) << "[APU] Fail to create NIR model:" << neuron_errCode;
106
    return false;
H
hong19860320 已提交
107 108 109 110 111 112 113 114 115
  }
  VLOG(3) << "[APU] APU NIR model created!";

  auto GetCurrentUS = []() -> double {
    struct timeval time;
    gettimeofday(&time, NULL);
    return 1e+6 * time.tv_sec + time.tv_usec;
  };
  auto start_time = GetCurrentUS();
116
  compilation_ = lite::apu::Device::Global().Build(model_);
H
hong19860320 已提交
117 118
  if (compilation_ == nullptr) {
    LOG(WARNING) << "[APU] Build APU DLA model failed!";
119
    return false;
H
hong19860320 已提交
120 121 122
  }
  VLOG(3) << "[APU] APU DLA model created, Build cost "
          << GetCurrentUS() - start_time << " us";
123
  return true;
H
hong19860320 已提交
124 125
}

126
bool SubgraphEngine::LaunchDeviceProgram() {
H
hong19860320 已提交
127 128 129 130 131 132 133
  auto GetCurrentUS = []() -> double {
    struct timeval time;
    gettimeofday(&time, NULL);
    return 1e+6 * time.tv_sec + time.tv_usec;
  };

  auto start_time = GetCurrentUS();
134 135
  NeuronExecution* run = NULL;
  int neuron_errCode = NeuronExecution_create(compilation_, &run);
H
hong19860320 已提交
136 137
  if (NEURON_NO_ERROR != neuron_errCode) {
    LOG(WARNING) << "[APU] Build APU runtime failed!";
138
    return false;
H
hong19860320 已提交
139 140 141 142
  }

  // Set input buffer
  for (size_t i = 0; i < origin_itensors_.size(); i++) {
143 144
    auto origin_data = origin_itensors_[i]->mutable_data<int8_t>();
    auto converted_data = reinterpret_cast<uint8_t*>(origin_data);
H
hong19860320 已提交
145
    for (int j = 0; j < origin_itensors_[i]->data_size(); j++) {
146 147
      converted_data[j] =
          static_cast<uint8_t>(static_cast<int16_t>(origin_data[j]) + 128);
H
hong19860320 已提交
148
    }
149
    NeuronExecution_setInput(
150
        run, i, NULL, converted_data, origin_itensors_[i]->memory_size());
H
hong19860320 已提交
151 152 153 154
  }

  // Set output buffer
  for (size_t i = 0; i < origin_otensors_.size(); i++) {
155 156
    NeuronExecution_setOutput(
        run,
H
hong19860320 已提交
157 158 159 160 161 162
        i,
        NULL,
        reinterpret_cast<void*>(origin_otensors_[i]->raw_data()),
        origin_otensors_[i]->memory_size());
  }

163
  neuron_errCode = NeuronExecution_compute(run);
H
hong19860320 已提交
164 165
  if (NEURON_NO_ERROR != neuron_errCode) {
    LOG(WARNING) << "Fail to run execution!" << neuron_errCode;
166
    return false;
H
hong19860320 已提交
167 168 169
  }

  for (size_t i = 0; i < origin_otensors_.size(); i++) {
170 171
    auto converted_data = origin_otensors_[i]->mutable_data<int8_t>();
    auto origin_data = reinterpret_cast<uint8_t*>(converted_data);
H
hong19860320 已提交
172
    for (int j = 0; j < origin_otensors_[i]->data_size(); j++) {
173 174
      converted_data[j] =
          static_cast<int8_t>(static_cast<int16_t>(origin_data[j]) - 128);
H
hong19860320 已提交
175 176
    }
  }
177
  NeuronExecution_free(run);
H
hong19860320 已提交
178
  VLOG(3) << "[APU] Process cost " << GetCurrentUS() - start_time << " us";
179
  return true;
H
hong19860320 已提交
180 181
}

182 183 184 185 186 187 188 189 190
SubgraphEngine::~SubgraphEngine() {
  if (compilation_) {
    NeuronCompilation_free(compilation_);
  }
  if (model_) {
    NeuronModel_free(model_);
  }
}

H
hong19860320 已提交
191 192 193
void SubgraphCompute::PrepareForRun() {
  auto& param = this->Param<param_t>();
  engine_.reset(new SubgraphEngine(ctx_.get(),
194 195 196
                                   param.block_idx,
                                   param.program_desc,
                                   param.exec_scope,
H
hong19860320 已提交
197
                                   param.input_data_names,
198
                                   param.output_data_names));
H
hong19860320 已提交
199 200 201 202 203
  CHECK(engine_);
}

void SubgraphCompute::Run() {
  CHECK(engine_);
204
  engine_->Run();
H
hong19860320 已提交
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
}

}  // namespace apu
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

REGISTER_LITE_KERNEL(subgraph,
                     kAPU,
                     kInt8,
                     kNCHW,
                     paddle::lite::kernels::apu::SubgraphCompute,
                     def)
    .BindInput("Inputs",
               {LiteType::GetTensorTy(TARGET(kHost),
                                      PRECISION(kInt8),
                                      DATALAYOUT(kNCHW))})
    .BindOutput("Outputs",
                {LiteType::GetTensorTy(TARGET(kHost),
                                       PRECISION(kInt8),
                                       DATALAYOUT(kNCHW))})
    .Finalize();