提交 5fd68ac1 编写于 作者: L lidanqing 提交者: Tao Luo

some fixes for int8 mobilenet_ssd tester (#18112)

* some fixes for int8 mobilenet_ssd tester
test=develop

* change wrong data file name
test=develop

* change test images bin file from 200 images to 100 images

* change directory existence to file existence during downloading
test=develop

* reuse download_data
test=develop

* run full dataset when iterations=0
test=develop
上级 c2efdfd5
...@@ -5,22 +5,20 @@ if(WITH_GPU AND TENSORRT_FOUND) ...@@ -5,22 +5,20 @@ if(WITH_GPU AND TENSORRT_FOUND)
endif() endif()
function(download_data install_dir data_file) function(download_data install_dir data_file)
if (NOT EXISTS ${install_dir}) if (NOT EXISTS ${install_dir}/${data_file})
inference_download_and_uncompress(${install_dir} ${INFERENCE_URL} ${data_file}) inference_download_and_uncompress(${install_dir} ${INFERENCE_URL} ${data_file})
endif() endif()
endfunction() endfunction()
function(download_int8_data install_dir data_file) function(download_int8_data install_dir data_file)
if (NOT EXISTS ${install_dir}) if (NOT EXISTS ${install_dir}/${data_file})
inference_download_and_uncompress(${install_dir} ${INFERENCE_URL}/int8 ${data_file}) inference_download_and_uncompress(${install_dir} ${INFERENCE_URL}/int8 ${data_file})
endif() endif()
endfunction() endfunction()
function(download_model_and_data install_dir model_name data_name) function(download_model_and_data install_dir model_name data_name)
if (NOT EXISTS ${install_dir}) download_data(${install_dir} ${model_name})
inference_download_and_uncompress(${install_dir} ${INFERENCE_URL} ${model_name}) download_data(${install_dir} ${data_name})
inference_download_and_uncompress(${install_dir} ${INFERENCE_URL} ${data_name})
endif()
endfunction() endfunction()
function(inference_analysis_api_test target install_dir filename) function(inference_analysis_api_test target install_dir filename)
...@@ -234,12 +232,12 @@ if(WITH_MKLDNN) ...@@ -234,12 +232,12 @@ if(WITH_MKLDNN)
inference_analysis_api_int8_test_run(test_analyzer_int8_googlenet ${INT8_IMG_CLASS_TEST_APP} ${INT8_GOOGLENET_MODEL_DIR} ${IMAGENET_DATA_PATH}) inference_analysis_api_int8_test_run(test_analyzer_int8_googlenet ${INT8_IMG_CLASS_TEST_APP} ${INT8_GOOGLENET_MODEL_DIR} ${IMAGENET_DATA_PATH})
### Object detection models ### Object detection models
set(PASCALVOC_DATA_PATH "${INT8_DATA_DIR}/pascalvoc_data.bin") set(PASCALVOC_DATA_PATH "${INT8_DATA_DIR}/pascalvoc_val_head_100.bin")
set(INT8_OBJ_DETECT_TEST_APP "test_analyzer_int8_object_detection") set(INT8_OBJ_DETECT_TEST_APP "test_analyzer_int8_object_detection")
set(INT8_OBJ_DETECT_TEST_APP_SRC "analyzer_int8_object_detection_tester.cc") set(INT8_OBJ_DETECT_TEST_APP_SRC "analyzer_int8_object_detection_tester.cc")
# download dataset if necessary # download dataset if necessary
download_int8_data(${INT8_DATA_DIR} "pascalvoc_val_100_head.tar.gz") download_int8_data(${INT8_DATA_DIR} "pascalvoc_val_head_100.tar.gz")
# build test binary to be used in subsequent tests # build test binary to be used in subsequent tests
inference_analysis_api_int8_test_build(${INT8_OBJ_DETECT_TEST_APP} ${INT8_OBJ_DETECT_TEST_APP_SRC}) inference_analysis_api_int8_test_build(${INT8_OBJ_DETECT_TEST_APP} ${INT8_OBJ_DETECT_TEST_APP_SRC})
......
...@@ -70,9 +70,9 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData( ...@@ -70,9 +70,9 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData(
const std::vector<std::vector<PaddleTensor>> &test_data, const std::vector<std::vector<PaddleTensor>> &test_data,
int num_images = FLAGS_warmup_batch_size) { int num_images = FLAGS_warmup_batch_size) {
int test_data_batch_size = test_data[0][0].shape[0]; int test_data_batch_size = test_data[0][0].shape[0];
auto iterations_max = test_data.size(); auto iterations = test_data.size();
PADDLE_ENFORCE( PADDLE_ENFORCE(
static_cast<size_t>(num_images) <= iterations_max * test_data_batch_size, static_cast<size_t>(num_images) <= iterations * test_data_batch_size,
"The requested quantization warmup data size " + "The requested quantization warmup data size " +
std::to_string(num_images) + " is bigger than all test data size."); std::to_string(num_images) + " is bigger than all test data size.");
...@@ -130,7 +130,11 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs, ...@@ -130,7 +130,11 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs,
label_batch_shape, "label"); label_batch_shape, "label");
auto iterations_max = total_images / batch_size; auto iterations_max = total_images / batch_size;
for (auto i = 0; i < iterations_max; i++) { auto iterations = iterations_max;
if (FLAGS_iterations > 0 && FLAGS_iterations < iterations_max) {
iterations = FLAGS_iterations;
}
for (auto i = 0; i < iterations; i++) {
auto images = image_reader.NextBatch(); auto images = image_reader.NextBatch();
auto labels = label_reader.NextBatch(); auto labels = label_reader.NextBatch();
inputs->emplace_back( inputs->emplace_back(
......
...@@ -79,7 +79,7 @@ class TensorReader { ...@@ -79,7 +79,7 @@ class TensorReader {
}; };
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs, void SetInput(std::vector<std::vector<PaddleTensor>> *inputs,
int32_t batch_size = FLAGS_batch_size, int process_images = 0) { int32_t batch_size = FLAGS_batch_size) {
std::ifstream file(FLAGS_infer_data, std::ios::binary); std::ifstream file(FLAGS_infer_data, std::ios::binary);
if (!file) { if (!file) {
FAIL() << "Couldn't open file: " << FLAGS_infer_data; FAIL() << "Couldn't open file: " << FLAGS_infer_data;
...@@ -110,9 +110,12 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs, ...@@ -110,9 +110,12 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs,
TensorReader<float> bbox_reader(file, bbox_beginning_offset, "gt_bbox"); TensorReader<float> bbox_reader(file, bbox_beginning_offset, "gt_bbox");
TensorReader<int64_t> difficult_reader(file, difficult_beginning_offset, TensorReader<int64_t> difficult_reader(file, difficult_beginning_offset,
"gt_difficult"); "gt_difficult");
if (process_images == 0) process_images = total_images; auto iterations_max = total_images / batch_size;
auto iterations_max = process_images / batch_size; auto iterations = iterations_max;
for (auto i = 0; i < iterations_max; i++) { if (FLAGS_iterations > 0 && FLAGS_iterations < iterations_max) {
iterations = FLAGS_iterations;
}
for (auto i = 0; i < iterations; i++) {
auto images_tensor = image_reader.NextBatch({batch_size, 3, 300, 300}, {}); auto images_tensor = image_reader.NextBatch({batch_size, 3, 300, 300}, {});
std::vector<size_t> batch_lod(lod_full.begin() + i * batch_size, std::vector<size_t> batch_lod(lod_full.begin() + i * batch_size,
lod_full.begin() + batch_size * (i + 1)); lod_full.begin() + batch_size * (i + 1));
...@@ -139,9 +142,9 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData( ...@@ -139,9 +142,9 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData(
const std::vector<std::vector<PaddleTensor>> &test_data, const std::vector<std::vector<PaddleTensor>> &test_data,
int32_t num_images = FLAGS_warmup_batch_size) { int32_t num_images = FLAGS_warmup_batch_size) {
int test_data_batch_size = test_data[0][0].shape[0]; int test_data_batch_size = test_data[0][0].shape[0];
auto iterations_max = test_data.size(); auto iterations = test_data.size();
PADDLE_ENFORCE( PADDLE_ENFORCE(
static_cast<int32_t>(num_images) <= iterations_max * test_data_batch_size, static_cast<int32_t>(num_images) <= iterations * test_data_batch_size,
"The requested quantization warmup data size " + "The requested quantization warmup data size " +
std::to_string(num_images) + " is bigger than all test data size."); std::to_string(num_images) + " is bigger than all test data size.");
...@@ -214,23 +217,23 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData( ...@@ -214,23 +217,23 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData(
static_cast<int64_t *>(difficult.data.data()) + objects_accum); static_cast<int64_t *>(difficult.data.data()) + objects_accum);
objects_accum = objects_accum + objects_in_batch; objects_accum = objects_accum + objects_in_batch;
} }
if (batch_remain > 0) {
size_t objects_remain = test_data[batches][1].lod[0][batch_remain]; size_t objects_remain = test_data[batches][1].lod[0][batch_remain];
std::copy_n( std::copy_n(static_cast<float *>(test_data[batches][0].data.data()),
static_cast<float *>(test_data[batches][0].data.data()), batch_remain * 3 * 300 * 300,
batch_remain * 3 * 300 * 300, static_cast<float *>(images.data.data()) +
static_cast<float *>(images.data.data()) + objects_accum * 3 * 300 * 300); objects_accum * 3 * 300 * 300);
std::copy_n(static_cast<int64_t *>(test_data[batches][1].data.data()), std::copy_n(static_cast<int64_t *>(test_data[batches][1].data.data()),
objects_remain, objects_remain,
static_cast<int64_t *>(labels.data.data()) + objects_accum); static_cast<int64_t *>(labels.data.data()) + objects_accum);
std::copy_n(static_cast<float *>(test_data[batches][2].data.data()), std::copy_n(static_cast<float *>(test_data[batches][2].data.data()),
objects_remain * 4, objects_remain * 4,
static_cast<float *>(bbox.data.data()) + objects_accum * 4); static_cast<float *>(bbox.data.data()) + objects_accum * 4);
std::copy_n(static_cast<int64_t *>(test_data[batches][3].data.data()), std::copy_n(static_cast<int64_t *>(test_data[batches][3].data.data()),
objects_remain, objects_remain,
static_cast<int64_t *>(difficult.data.data()) + objects_accum); static_cast<int64_t *>(difficult.data.data()) + objects_accum);
objects_accum = objects_accum + objects_remain;
objects_accum = objects_accum + objects_remain; }
PADDLE_ENFORCE( PADDLE_ENFORCE(
static_cast<size_t>(num_objects) == static_cast<size_t>(objects_accum), static_cast<size_t>(num_objects) == static_cast<size_t>(objects_accum),
"The requested num of objects " + std::to_string(num_objects) + "The requested num of objects " + std::to_string(num_objects) +
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册