未验证 提交 111db475 编写于 作者: J juncaipeng 提交者: GitHub

Modify detection test (#2000)

* add assign_value op, arm kernel and test, add fluid_type, test=develop

* add hard_sigmoid, test=develop

* use image and new imple to test detection model, delete faster_rcnn_test, test=develop
上级 f25a4571
......@@ -16,84 +16,122 @@
#include <gtest/gtest.h>
#include <fstream>
#include <vector>
#include "lite/api/cxx_api.h"
#include "lite/api/paddle_api.h"
#include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h"
#include "lite/api/paddle_use_passes.h"
#include "lite/api/test_helper.h"
#include "lite/core/op_registry.h"
DEFINE_bool(is_run_model_optimize,
false,
"apply model_optimize_tool to model, use optimized model to test");
namespace paddle {
namespace lite {
namespace lite_api {
#ifdef LITE_WITH_ARM
void TestModel(const std::vector<Place>& valid_places,
const Place& preferred_place) {
DeviceInfo::Init();
DeviceInfo::Global().SetRunMode(lite_api::LITE_POWER_HIGH, FLAGS_threads);
lite::Predictor predictor;
void OutputOptModel(const std::string& load_model_dir,
const std::string& save_optimized_model_dir) {
lite_api::CxxConfig config;
config.set_model_dir(load_model_dir);
config.set_preferred_place(Place{TARGET(kX86), PRECISION(kFloat)});
config.set_valid_places({
Place{TARGET(kX86), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kFloat)},
});
auto predictor = lite_api::CreatePaddlePredictor(config);
predictor.Build(FLAGS_model_dir, "", "", preferred_place, valid_places);
int ret = system(
paddle::lite::string_format("rm -rf %s", save_optimized_model_dir.c_str())
.c_str());
if (ret == 0) {
LOG(INFO) << "delete old optimized model " << save_optimized_model_dir;
}
predictor->SaveOptimizedModel(save_optimized_model_dir,
LiteModelType::kNaiveBuffer);
LOG(INFO) << "Load model from " << load_model_dir;
LOG(INFO) << "Save optimized model to " << save_optimized_model_dir;
}
auto* input_image = predictor.GetInput(0);
input_image->Resize({1, 3, 1333, 800});
auto* input_image_data = input_image->mutable_data<float>();
std::ifstream read_file("/data/local/tmp/pjc/faster_rcnn_img.txt");
for (int i = 0; i < input_image->numel(); i++) {
read_file >> input_image_data[i];
#ifdef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
void Run(const std::string& model_dir,
const int repeat,
const int warmup_times,
const int thread_num) {
// set config and create predictor
lite_api::MobileConfig config;
config.set_model_dir(model_dir);
config.set_threads(thread_num);
if (thread_num == 1) {
config.set_power_mode(LITE_POWER_HIGH);
} else {
config.set_power_mode(LITE_POWER_NO_BIND);
}
read_file.close();
LOG(INFO) << "image data:" << input_image_data[0] << " "
<< input_image_data[input_image->numel() - 1];
auto* im_info = predictor.GetInput(1);
im_info->Resize({1, 3});
auto* im_info_data = im_info->mutable_data<float>();
im_info_data[0] = 1333;
im_info_data[1] = 800;
im_info_data[2] = 1;
auto predictor = lite_api::CreatePaddlePredictor(config);
auto* im_shape = predictor.GetInput(2);
im_shape->Resize({1, 3});
auto* im_shape_data = im_shape->mutable_data<float>();
im_shape_data[0] = 1333;
im_shape_data[1] = 800;
im_shape_data[2] = 1;
// set input
auto input_image = predictor->GetInput(0);
input_image->Resize({1, 3, 300, 300});
auto input_image_data = input_image->mutable_data<float>();
std::ifstream read_file("/data/local/tmp/pjc/ssd_img.txt");
if (!read_file.is_open()) {
LOG(INFO) << "read image file fail";
return;
}
auto input_shape = input_image->shape();
int64_t input_image_size = 1;
for (auto t : input_shape) {
input_image_size *= t;
}
for (int i = 0; i < input_image_size; i++) {
read_file >> input_image_data[i];
}
for (int i = 0; i < FLAGS_warmup; ++i) {
predictor.Run();
// warmup and run
for (int i = 0; i < warmup_times; ++i) {
predictor->Run();
}
auto start = GetCurrentUS();
for (int i = 0; i < FLAGS_repeats; ++i) {
predictor.Run();
auto start = lite::GetCurrentUS();
for (int i = 0; i < repeat; ++i) {
predictor->Run();
}
// show result
auto end = lite::GetCurrentUS();
LOG(INFO) << "================== Speed Report ===================";
LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads
<< ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats
<< ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0
<< ", spend " << (end - start) / FLAGS_repeats / 1000.0
<< " ms in average.";
auto* out = predictor.GetOutput(0);
auto* out_data = out->data<float>();
LOG(INFO) << "==========output data===============";
LOG(INFO) << out->dims();
for (int i = 0; i < out->numel(); i++) {
auto out = predictor->GetOutput(0);
auto out_data = out->data<float>();
LOG(INFO) << "output shape:";
auto out_shape = out->shape();
for (auto t : out_shape) {
LOG(INFO) << t;
}
LOG(INFO) << "output data:";
int output_len = 20;
for (int i = 0; i < output_len; i++) {
LOG(INFO) << out_data[i];
}
}
#endif
TEST(Faster_RCNN, test_arm) {
std::vector<Place> valid_places({
Place{TARGET(kHost), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kFloat)},
});
} // namespace lite_api
} // namespace paddle
TestModel(valid_places, Place({TARGET(kARM), PRECISION(kFloat)}));
TEST(Faster_RCNN, test_arm) {
std::string save_optimized_model_dir;
if (FLAGS_is_run_model_optimize) {
save_optimized_model_dir = FLAGS_model_dir + "opt";
paddle::lite_api::OutputOptModel(FLAGS_model_dir, save_optimized_model_dir);
}
std::string run_model_dir =
FLAGS_is_run_model_optimize ? save_optimized_model_dir : FLAGS_model_dir;
paddle::lite_api::Run(
run_model_dir, FLAGS_repeats, FLAGS_threads, FLAGS_warmup);
}
#endif // LITE_WITH_ARM
} // namespace lite
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册