cxx_api.cc 13.7 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/api/cxx_api.h"
16

17
#include <algorithm>
Y
Yan Chunwei 已提交
18
#include <memory>
19
#include <set>
Y
Yan Chunwei 已提交
20 21 22
#include <string>
#include <utility>
#include <vector>
23

24
#include "lite/api/paddle_use_passes.h"
Y
Yan Chunwei 已提交
25 26 27 28 29
#include "lite/utils/io.h"

namespace paddle {
namespace lite {

30
std::vector<std::string> GetAllOps() {
31
  return OpLiteFactory::Global().GetAllOps();
32 33
}

Y
Yan Chunwei 已提交
34
void Predictor::SaveModel(const std::string &dir,
35 36
                          lite_api::LiteModelType model_type,
                          bool record_info) {
Y
Yan Chunwei 已提交
37 38 39
  if (!program_) {
    GenRuntimeProgram();
  }
40
  program_->SaveToProgram(program_desc_);
Y
Yan Chunwei 已提交
41 42
  switch (model_type) {
    case lite_api::LiteModelType::kProtobuf:
43
      SaveModelPb(dir, *program_->exec_scope(), *program_desc_.get(), true);
Y
Yan Chunwei 已提交
44 45
      break;
    case lite_api::LiteModelType::kNaiveBuffer:
46
      SaveModelNaive(dir, *program_->exec_scope(), *program_desc_.get());
Y
Yan Chunwei 已提交
47 48 49 50
      break;
    default:
      LOG(FATAL) << "Unknown model type";
  }
51
  if (record_info) {
52
    MkDirRecur(dir);
53 54 55 56 57 58 59
    SaveOpKernelInfo(dir);
  }
}

void Predictor::SaveOpKernelInfo(const std::string &model_dir) {
  std::set<std::string> ops_info;
  std::set<std::string> kernels_info;
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
  auto block_size = program_->block_size();
  for (size_t block_idx = 0; block_idx < block_size; ++block_idx) {
    const auto &insts = program_->instructions(block_idx);
    for (auto &inst : insts) {
      // parse op type infomation
      auto op = inst.op()->op_info();
      ops_info.insert(op->Type());
      // parse kernel type information
      std::string kernel_type_str =
          inst.kernel()->op_type() + "," + TargetRepr(inst.kernel()->target()) +
          "," + PrecisionRepr(inst.kernel()->precision()) + "," +
          DataLayoutRepr(inst.kernel()->layout()) + "," +
          inst.kernel()->alias();
      kernels_info.insert(kernel_type_str);
    }
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
  }

  // get souce_file name from op type and kernel type
  auto op2pathmap = OpKernelInfoCollector::Global().GetOp2PathDict();
  auto kernel2pathmap = OpKernelInfoCollector::Global().GetKernel2PathDict();

  // write used op and kernel info into files
  std::string opf_path = model_dir + "/" + TAILORD_OPS_LIST_NAME;
  std::string opf_source_path =
      model_dir + "/" + TAILORD_OPS_SOURCE_LIST_FILENAME;
  std::string kpf_path = model_dir + "/" + TAILORD_KERNELS_LIST_NAME;
  std::string kpf_source_path =
      model_dir + "/" + TAILORD_KERNELS_SOURCE_LIST_FILENAME;
  std::map<std::string, std::string> op2path;

  std::FILE *opf = std::fopen(opf_path.c_str(), "w");
  std::FILE *opf_source = std::fopen(opf_source_path.c_str(), "w");
  std::FILE *kpf = std::fopen(kpf_path.c_str(), "w");
  std::FILE *kpf_source = std::fopen(kpf_source_path.c_str(), "w");
  std::vector<std::string> opcompile;
  std::vector<std::string> kernelcompile;

  if (nullptr == opf || nullptr == opf_source || nullptr == opf ||
      nullptr == kpf_source) {
    LOG(FATAL) << "failed to create info file into: " << model_dir;
  }
  for (auto op_info = ops_info.begin(); op_info != ops_info.end(); op_info++) {
    fputs(op_info->c_str(), opf);
    fputc('\n', opf);
    std::string op_path = op2pathmap[*op_info];
    fputs(op_path.c_str(), opf_source);
    fputc('\n', opf_source);
  }
  std::fclose(opf_source);
  std::fclose(opf);
  LOG(INFO) << "operators information of tailored model is stored into: "
            << opf_path;

  // write Kernel_type and Kernel_path into file
  for (auto kernel_info = kernels_info.begin();
       kernel_info != kernels_info.end();
       kernel_info++) {
    fputs(kernel_info->c_str(), kpf);
    fputc('\n', kpf);
    std::string kernel_path = kernel2pathmap[*kernel_info];
    fputs(kernel_path.c_str(), kpf_source);
    fputc('\n', kpf_source);
    if (kernel_path == "conv_compute.cc") {
      fputs(
          "conv_depthwise.cc\nconv_direct.cc\nconv_gemmlike.cc\nconv_"
          "winograd.cc\n",
          kpf_source);
    }
  }
  std::fclose(kpf_source);
  std::fclose(kpf);
  LOG(INFO) << "kernels information of tailored model is stored into: "
            << kpf_path;
Y
Yan Chunwei 已提交
133 134
}

135
#ifndef LITE_WITH_FPGA
Y
Yan Chunwei 已提交
136
lite::Tensor *Predictor::GetInput(size_t offset) {
137 138 139 140 141 142 143
  CHECK(input_names_.size() > offset)
      << "The network has " << input_names_.size() << " inputs"
      << ", the offset should be less than this.";
  auto *in_var = exec_scope_->FindVar(input_names_[offset]);
  CHECK(in_var) << "no fatch variable " << input_names_[offset]
                << " in exec_scope";
  return in_var->GetMutable<lite::Tensor>();
Y
Yan Chunwei 已提交
144
}
145 146 147 148 149 150 151 152 153 154 155
#else
lite::Tensor *Predictor::GetInput(size_t offset) {
  auto *_feed_list = exec_scope_->FindVar("feed");
  CHECK(_feed_list) << "no feed variable in exec_scope";
  auto *feed_list = _feed_list->GetMutable<std::vector<lite::Tensor>>();
  if (offset >= feed_list->size()) {
    feed_list->resize(offset + 1);
  }
  return &feed_list->at(offset);
}
#endif
Y
Yan Chunwei 已提交
156

157
// get inputs names
S
sangoly 已提交
158
std::vector<std::string> Predictor::GetInputNames() { return input_names_; }
159

160
// get outputnames
S
sangoly 已提交
161
std::vector<std::string> Predictor::GetOutputNames() { return output_names_; }
162

163 164 165 166 167
// get param names
std::vector<std::string> Predictor::GetParamNames() {
  return exec_scope_->AttributeVarNames();
}

168 169
// append the names of inputs and outputs into input_names_ and output_names_
void Predictor::PrepareFeedFetch() {
170 171 172
  if (!program_) {
    GenRuntimeProgram();
  }
173 174 175

  std::vector<const cpp::OpDesc *> feeds;
  std::vector<const cpp::OpDesc *> fetchs;
176 177 178
  const auto &insts = program_->instructions(kRootBlockIdx);
  for (auto &inst : insts) {
    const auto &op = inst.op()->op_info();
179
    if (op->Type() == "feed") {
180
      feeds.push_back(op);
181
    } else if (op->Type() == "fetch") {
182
      fetchs.push_back(op);
183 184
    }
  }
185

186 187
  input_names_.resize(feeds.size());
  output_names_.resize(fetchs.size());
188
  for (size_t i = 0; i < feeds.size(); i++) {
189 190 191
    input_names_[feeds[i]->GetAttr<int>("col")] =
        feeds[i]->Output("Out").front();
  }
192
  for (size_t i = 0; i < fetchs.size(); i++) {
193 194 195
    output_names_[fetchs[i]->GetAttr<int>("col")] =
        fetchs[i]->Input("X").front();
  }
196 197
}

198 199
#ifndef LITE_WITH_FPGA

Y
Yan Chunwei 已提交
200
const lite::Tensor *Predictor::GetOutput(size_t offset) const {
201 202 203 204 205 206 207
  CHECK(output_names_.size() > offset)
      << "The network has " << output_names_.size() << " outputs"
      << ", the offset should be less than this.";
  const std::string name = output_names_.at(offset);
  auto *out_var = exec_scope_->FindVar(name);
  CHECK(out_var) << "no fatch variable " << name << " in exec_scope";
  return out_var->GetMutable<lite::Tensor>();
Y
Yan Chunwei 已提交
208 209
}

210 211 212 213 214 215 216 217
std::vector<const lite::Tensor *> Predictor::GetOutputs() const {
  std::vector<const lite::Tensor *> outputs;
  size_t out_size = output_names_.size();
  for (size_t i = 0; i < out_size; i++) {
    const std::string name = output_names_.at(i);
    outputs.push_back(GetTensor(name));
  }
  return outputs;
T
TianXiaogang 已提交
218
}
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
#else

const lite::Tensor *Predictor::GetOutput(size_t offset) const {
  auto *_fetch_list = exec_scope_->FindVar("fetch");
  CHECK(_fetch_list) << "no fatch variable in exec_scope";
  auto &fetch_list = *_fetch_list->GetMutable<std::vector<lite::Tensor>>();
  CHECK_LT(offset, fetch_list.size()) << "offset " << offset << " overflow";
  return &fetch_list.at(offset);
}

std::vector<const lite::Tensor *> Predictor::GetOutputs() const {
  auto *_fetch_list = exec_scope_->FindVar("fetch");
  CHECK(_fetch_list) << "no fatch variable in exec_scope";
  auto &fetch_list = *_fetch_list->GetMutable<std::vector<lite::Tensor>>();

  std::vector<const lite::Tensor *> outputs;
  for (auto out : fetch_list) {
    outputs.push_back(&out);
  }
  return outputs;
}

#endif
T
TianXiaogang 已提交
242

Y
Yan Chunwei 已提交
243
const cpp::ProgramDesc &Predictor::program_desc() const {
244
  return *program_desc_.get();
Y
Yan Chunwei 已提交
245 246 247
}
const RuntimeProgram &Predictor::runtime_program() const { return *program_; }

248 249 250 251 252 253 254 255
void Predictor::Build(const lite_api::CxxConfig &config,
                      const std::vector<Place> &valid_places,
                      const std::vector<std::string> &passes,
                      lite_api::LiteModelType model_type) {
  const std::string &model_path = config.model_dir();
  const std::string &model_file = config.model_file();
  const std::string &param_file = config.param_file();
  const bool model_from_memory = config.model_from_memory();
256 257 258 259 260
  if (model_from_memory) {
    LOG(INFO) << "Load model from memory.";
  } else {
    LOG(INFO) << "Load model from file.";
  }
261 262 263 264 265 266 267 268
  Build(model_path,
        model_file,
        param_file,
        valid_places,
        passes,
        model_type,
        model_from_memory);
}
Y
Yan Chunwei 已提交
269
void Predictor::Build(const std::string &model_path,
270 271
                      const std::string &model_file,
                      const std::string &param_file,
Y
Yan Chunwei 已提交
272 273
                      const std::vector<Place> &valid_places,
                      const std::vector<std::string> &passes,
274 275
                      lite_api::LiteModelType model_type,
                      bool model_from_memory) {
Y
Yan Chunwei 已提交
276
  switch (model_type) {
277 278 279 280 281 282 283 284 285
    case lite_api::LiteModelType::kProtobuf: {
      bool combined_param = false;
      if (!model_file.empty() && !param_file.empty()) {
        combined_param = true;
      }
      LoadModelPb(model_path,
                  model_file,
                  param_file,
                  scope_.get(),
286
                  program_desc_.get(),
287 288
                  combined_param,
                  model_from_memory);
289
    } break;
Y
Yan Chunwei 已提交
290
    case lite_api::LiteModelType::kNaiveBuffer:
291 292
      CHECK(!model_path.empty())
          << "NaiveBuffer backend only supported combined param";
293
      LoadModelNaiveFromFile(model_path, scope_.get(), program_desc_.get());
Y
Yan Chunwei 已提交
294 295 296 297
      break;
    default:
      LOG(FATAL) << "Unknown model type";
  }
298
  Build(program_desc_, valid_places, passes);
Y
Yan Chunwei 已提交
299 300
}

301
void Predictor::Build(const std::shared_ptr<cpp::ProgramDesc> &program_desc,
Y
Yan Chunwei 已提交
302 303
                      const std::vector<Place> &valid_places,
                      const std::vector<std::string> &passes) {
304
  program_desc_ = program_desc;
305
  // `inner_places` is used to optimize passes
306
  std::vector<Place> inner_places = valid_places;
307
  for (auto &valid_place : valid_places) {
308
    if (valid_place.target == TARGET(kOpenCL)) continue;
309 310 311
    inner_places.emplace_back(
        Place(TARGET(kHost), valid_place.precision, valid_place.layout));
  }
312

313 314
  // Analysis whether the modle is quantized.
  // For quantized model, add place(arm, int8) to inner_places
315 316 317 318 319 320 321 322
  const std::vector<std::string> quant_dequant_op = {
      "fake_quantize_abs_max",
      "fake_quantize_range_abs_max",
      "fake_quantize_moving_average_abs_max",
      "fake_quantize_dequantize_moving_average_abs_max",
      "fake_dequantize_max_abs",
      "fake_channel_wise_dequantize_max_abs"};
  bool is_quantized_model = false;
323
  for (size_t i = 0; i < program_desc_->BlocksSize() && !is_quantized_model;
324
       ++i) {
325
    auto *block_desc = program_desc_->GetBlock<cpp::BlockDesc>(i);
326 327 328 329 330 331 332 333 334 335 336
    for (size_t j = 0; j < block_desc->OpsSize() && !is_quantized_model; ++j) {
      auto *op_desc = block_desc->GetOp<cpp::OpDesc>(j);
      std::string op_type = op_desc->Type();
      if (std::find(quant_dequant_op.begin(),
                    quant_dequant_op.end(),
                    op_type) != quant_dequant_op.end()) {
        is_quantized_model = true;
      }
    }
  }
  if (is_quantized_model) {
337 338
    inner_places.insert(inner_places.begin(),
                        Place{TARGET(kARM), PRECISION(kInt8)});
339 340
  }

341
  Program program(program_desc_, scope_, inner_places);
342
  valid_places_ = inner_places;
343

Y
Yan Chunwei 已提交
344 345 346
  core::KernelPickFactor factor;
  factor.ConsiderTarget();
  factor.ConsiderPrecision();
347
  factor.ConsiderDataLayout();
348

349
  optimizer_.Run(std::move(program), inner_places, factor, passes);
Y
Yan Chunwei 已提交
350
  exec_scope_ = optimizer_.exec_scope();
351
  PrepareFeedFetch();
Y
Yan Chunwei 已提交
352 353 354 355 356 357
}

void Predictor::GenRuntimeProgram() {
  program_ = optimizer_.GenRuntimeProgram();
  CHECK_EQ(exec_scope_, program_->exec_scope());
  program_generated_ = true;
J
jiweibo 已提交
358
#ifdef LITE_WITH_CUDA
J
update  
jiweibo 已提交
359
  if (!cuda_use_multi_stream_) {
J
jiweibo 已提交
360
    program_->UpdateCudaStream(cuda_exec_stream_, cuda_io_stream_);
J
jiweibo 已提交
361 362
  }
#endif
Y
Yan Chunwei 已提交
363 364 365 366
}

const lite::Tensor *Predictor::GetTensor(const std::string &name) const {
  auto *var = exec_scope_->FindVar(name);
367
  CHECK(var) << "no variable named with " << name << " in exec_scope";
Y
Yan Chunwei 已提交
368 369
  return &var->Get<lite::Tensor>();
}
370

371 372 373 374 375 376
lite::Tensor *Predictor::GetMutableTensor(const std::string &name) {
  auto *var = exec_scope_->FindVar(name);
  CHECK(var) << "no variable named with " << name << " in exec_scope";
  return var->GetMutable<lite::Tensor>();
}

377 378
// get input by name
lite::Tensor *Predictor::GetInputByName(const std::string &name) {
379 380
  auto element = std::find(input_names_.begin(), input_names_.end(), name);
  if (element == input_names_.end()) {
381 382
    LOG(ERROR) << "Model do not have input named with: [" << name
               << "], model's inputs include:";
383
    for (size_t i = 0; i < input_names_.size(); i++) {
384 385
      LOG(ERROR) << "[" << input_names_[i] << "]";
    }
386
    return nullptr;
387
  } else {
388 389
    int position = std::distance(input_names_.begin(), element);
    return GetInput(position);
390 391
  }
}
Y
Yan Chunwei 已提交
392

M
mapingshuo 已提交
393 394 395 396 397 398 399 400 401 402
// #ifdef LITE_WITH_TRAIN
// void Predictor::FeedVars(const std::vector<framework::Tensor> &tensors) {
//   auto var = scope_->FindVar("feed");
//   auto &feed_list = *(var->GetMutable<std::vector<lite::Tensor>>());
//   feed_list.resize(tensors.size());

//   for (size_t i = 0; i < tensors.size(); ++i)
//     feed_list[i].ShareDataWith(tensors[i]);
// }
// #endif
Y
Yan Chunwei 已提交
403 404 405

}  // namespace lite
}  // namespace paddle