未验证 提交 111db475 编写于 作者: J juncaipeng 提交者: GitHub

Modify detection test (#2000)

* add assign_value op, arm kernel and test, add fluid_type, test=develop

* add hard_sigmoid, test=develop

* use image and new imple to test detection model, delete faster_rcnn_test, test=develop
上级 f25a4571
...@@ -16,84 +16,122 @@ ...@@ -16,84 +16,122 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <fstream> #include <fstream>
#include <vector> #include <vector>
#include "lite/api/cxx_api.h" #include "lite/api/paddle_api.h"
#include "lite/api/paddle_use_kernels.h" #include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h" #include "lite/api/paddle_use_ops.h"
#include "lite/api/paddle_use_passes.h" #include "lite/api/paddle_use_passes.h"
#include "lite/api/test_helper.h" #include "lite/api/test_helper.h"
#include "lite/core/op_registry.h" #include "lite/core/op_registry.h"
DEFINE_bool(is_run_model_optimize,
false,
"apply model_optimize_tool to model, use optimized model to test");
namespace paddle { namespace paddle {
namespace lite { namespace lite_api {
#ifdef LITE_WITH_ARM void OutputOptModel(const std::string& load_model_dir,
void TestModel(const std::vector<Place>& valid_places, const std::string& save_optimized_model_dir) {
const Place& preferred_place) { lite_api::CxxConfig config;
DeviceInfo::Init(); config.set_model_dir(load_model_dir);
DeviceInfo::Global().SetRunMode(lite_api::LITE_POWER_HIGH, FLAGS_threads); config.set_preferred_place(Place{TARGET(kX86), PRECISION(kFloat)});
lite::Predictor predictor; config.set_valid_places({
Place{TARGET(kX86), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kFloat)},
});
auto predictor = lite_api::CreatePaddlePredictor(config);
predictor.Build(FLAGS_model_dir, "", "", preferred_place, valid_places); int ret = system(
paddle::lite::string_format("rm -rf %s", save_optimized_model_dir.c_str())
.c_str());
if (ret == 0) {
LOG(INFO) << "delete old optimized model " << save_optimized_model_dir;
}
predictor->SaveOptimizedModel(save_optimized_model_dir,
LiteModelType::kNaiveBuffer);
LOG(INFO) << "Load model from " << load_model_dir;
LOG(INFO) << "Save optimized model to " << save_optimized_model_dir;
}
auto* input_image = predictor.GetInput(0); #ifdef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
input_image->Resize({1, 3, 1333, 800}); void Run(const std::string& model_dir,
auto* input_image_data = input_image->mutable_data<float>(); const int repeat,
std::ifstream read_file("/data/local/tmp/pjc/faster_rcnn_img.txt"); const int warmup_times,
for (int i = 0; i < input_image->numel(); i++) { const int thread_num) {
read_file >> input_image_data[i]; // set config and create predictor
lite_api::MobileConfig config;
config.set_model_dir(model_dir);
config.set_threads(thread_num);
if (thread_num == 1) {
config.set_power_mode(LITE_POWER_HIGH);
} else {
config.set_power_mode(LITE_POWER_NO_BIND);
} }
read_file.close();
LOG(INFO) << "image data:" << input_image_data[0] << " "
<< input_image_data[input_image->numel() - 1];
auto* im_info = predictor.GetInput(1); auto predictor = lite_api::CreatePaddlePredictor(config);
im_info->Resize({1, 3});
auto* im_info_data = im_info->mutable_data<float>();
im_info_data[0] = 1333;
im_info_data[1] = 800;
im_info_data[2] = 1;
auto* im_shape = predictor.GetInput(2); // set input
im_shape->Resize({1, 3}); auto input_image = predictor->GetInput(0);
auto* im_shape_data = im_shape->mutable_data<float>(); input_image->Resize({1, 3, 300, 300});
im_shape_data[0] = 1333; auto input_image_data = input_image->mutable_data<float>();
im_shape_data[1] = 800; std::ifstream read_file("/data/local/tmp/pjc/ssd_img.txt");
im_shape_data[2] = 1; if (!read_file.is_open()) {
LOG(INFO) << "read image file fail";
return;
}
auto input_shape = input_image->shape();
int64_t input_image_size = 1;
for (auto t : input_shape) {
input_image_size *= t;
}
for (int i = 0; i < input_image_size; i++) {
read_file >> input_image_data[i];
}
for (int i = 0; i < FLAGS_warmup; ++i) { // warmup and run
predictor.Run(); for (int i = 0; i < warmup_times; ++i) {
predictor->Run();
} }
auto start = GetCurrentUS(); auto start = lite::GetCurrentUS();
for (int i = 0; i < FLAGS_repeats; ++i) { for (int i = 0; i < repeat; ++i) {
predictor.Run(); predictor->Run();
} }
// show result
auto end = lite::GetCurrentUS();
LOG(INFO) << "================== Speed Report ==================="; LOG(INFO) << "================== Speed Report ===================";
LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads
<< ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats << ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats
<< ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0 << ", spend " << (end - start) / FLAGS_repeats / 1000.0
<< " ms in average."; << " ms in average.";
auto* out = predictor.GetOutput(0); auto out = predictor->GetOutput(0);
auto* out_data = out->data<float>(); auto out_data = out->data<float>();
LOG(INFO) << "==========output data==============="; LOG(INFO) << "output shape:";
LOG(INFO) << out->dims(); auto out_shape = out->shape();
for (int i = 0; i < out->numel(); i++) { for (auto t : out_shape) {
LOG(INFO) << t;
}
LOG(INFO) << "output data:";
int output_len = 20;
for (int i = 0; i < output_len; i++) {
LOG(INFO) << out_data[i]; LOG(INFO) << out_data[i];
} }
} }
#endif
TEST(Faster_RCNN, test_arm) { } // namespace lite_api
std::vector<Place> valid_places({ } // namespace paddle
Place{TARGET(kHost), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kFloat)},
});
TestModel(valid_places, Place({TARGET(kARM), PRECISION(kFloat)})); TEST(Faster_RCNN, test_arm) {
std::string save_optimized_model_dir;
if (FLAGS_is_run_model_optimize) {
save_optimized_model_dir = FLAGS_model_dir + "opt";
paddle::lite_api::OutputOptModel(FLAGS_model_dir, save_optimized_model_dir);
}
std::string run_model_dir =
FLAGS_is_run_model_optimize ? save_optimized_model_dir : FLAGS_model_dir;
paddle::lite_api::Run(
run_model_dir, FLAGS_repeats, FLAGS_threads, FLAGS_warmup);
} }
#endif // LITE_WITH_ARM
} // namespace lite
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册