terminating with uncaught exception of type std::length_error: basic_string
Created by: banbishan
安卓9手机运行编译好的c++可执行文件,optimize_out文件夹里为用tools优化过的__model__.nb和param.nb文件 ./model --model_dir="optimize_out" --optimized_model_dir="op" 报错如下
terminating with uncaught exception of type std::length_error: basic_string
Aborted
程序如下:
#include <gflags/gflags.h>
#include <stdio.h>
#include <vector>
#include <iostream>
#include <time.h>
#include "paddle_api.h" // NOLINT
#include "paddle_use_kernels.h" // NOLINT
#include "paddle_use_ops.h" // NOLINT
#include "paddle_use_passes.h" // NOLINT
using namespace paddle::lite_api; // NOLINT
using namespace std;
DEFINE_string(model_dir, "", "Model dir path.");
DEFINE_string(optimized_model_dir, "", "Optimized model dir.");
DEFINE_bool(prefer_int8_kernel, false, "Prefer to run model with int8 kernels");
int64_t ShapeProduction(const shape_t& shape) {
int64_t res = 1;
for (auto i : shape) res *= i;
return res;
}
void RunModel() {
// 1. Set CxxConfig
CxxConfig config;
config.set_model_dir(FLAGS_model_dir);
std::vector<Place> valid_places{Place{TARGET(kARM), PRECISION(kFloat)}};
if (FLAGS_prefer_int8_kernel) {
valid_places.push_back(Place{TARGET(kARM), PRECISION(kInt8)});
config.set_preferred_place(Place{TARGET(kARM), PRECISION(kInt8)});
} else {
config.set_preferred_place(Place{TARGET(kARM), PRECISION(kFloat)});
}
config.set_valid_places(valid_places);
// 2. Create PaddlePredictor by CxxConfig
std::shared_ptr<PaddlePredictor> predictor =
CreatePaddlePredictor<CxxConfig>(config);
// 3. Prepare input data
std::unique_ptr<Tensor> input_tensor(std::move(predictor->GetInput(0)));
input_tensor->Resize(shape_t({1,1,48,512}));
auto* data = input_tensor->mutable_data<float>();
for(int i = 0; i < ShapeProduction(input_tensor->shape()); ++i){
data[i] = 1;
}
std::unique_ptr<Tensor> init_scores(std::move(predictor->GetInput(2)));
init_scores->Resize(shape_t({1,1}));
auto* data_scores = init_scores->mutable_data<float>();
for(int i = 0; i < ShapeProduction(init_scores->shape()); ++i){
data_scores[i] = 0;
}
lod_t lod_s{{0,1},{0,1}};
init_scores->SetLoD(lod_s);
std::unique_ptr<Tensor> init_ids(std::move(predictor->GetInput(1)));
init_ids->Resize(shape_t({1,1}));
auto* data_ids = init_ids->mutable_data<float>();
for(int i = 0; i < ShapeProduction(init_ids->shape()); ++i){
data_ids[i] = 0;
}
lod_t lod_i{{0,1},{0,1}};
init_ids->SetLoD(lod_i);
// 4. Run predictor
predictor->Run();
clock_t start = clock();
for (int i = 0; i <1000; ++i){
predictor->Run();
}
clock_t end = clock();
std::cout<<(double)(end-start)/CLOCKS_PER_SEC<<std::endl;
// 6. Save optimition model
predictor->SaveOptimizedModel(FLAGS_optimized_model_dir,
LiteModelType::kNaiveBuffer);
}
int main(int argc, char** argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
RunModel();
return 0;
}