diff --git a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc index f956c34f23ac7cc6ca06b9fcf411d0f2e9b29c54..2570325c24abcbb4bd459944480d3279f24fab1f 100644 --- a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc @@ -245,8 +245,14 @@ TEST(Analyzer_bert, transfer_scope_cache) { // Since paddle::framework::global_transfer_scope_cache() and // paddle::framework::global_transfer_data_cache() are thread_local, // their pointer should be different among different thread id. - PADDLE_ENFORCE(global_transfer_scope_cache.size(), threads_num); - PADDLE_ENFORCE(global_transfer_data_cache.size(), threads_num); + PADDLE_ENFORCE_EQ( + global_transfer_scope_cache.size(), threads_num, + paddle::platform::errors::Fatal( + "The size of scope cache is not equal to thread number.")); + PADDLE_ENFORCE_EQ( + global_transfer_data_cache.size(), threads_num, + paddle::platform::errors::Fatal( + "The size of data cache is not equal to thread number.")); } } // namespace inference diff --git a/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc b/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc index 0bc67aff7af1be9f34ffa2bb71c25d2964a62521..a9c24c4503f9f1b803c0d9fcde21199ef4089c41 100644 --- a/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc @@ -69,11 +69,13 @@ void PD_run() { PD_DeletePaddleTensor(input); int size; const int* out_shape = PD_GetPaddleTensorShape(out_data, &size); - CHECK(size == 2) << "The Output shape's size is NOT match."; + PADDLE_ENFORCE_EQ(size, 2, paddle::platform::errors::InvalidArgument( + "The Output shape's size is NOT match.")); std::vector ref_outshape_size({9, 6}); for (int i = 0; i < 2; ++i) { - CHECK(out_shape[i] == ref_outshape_size[i]) - << "The Output's shape is NOT match."; + PADDLE_ENFORCE_EQ(out_shape[i], ref_outshape_size[i], + paddle::platform::errors::InvalidArgument( + "The Output shape's size is NOT match.")); } PD_DeletePaddleBuf(buf); } diff --git a/paddle/fluid/inference/tests/api/analyzer_capi_tester.cc b/paddle/fluid/inference/tests/api/analyzer_capi_tester.cc index d76799a679cbf27700c6d9af4f2e2e50c5e33e35..fd20581123c10f8c569e7765c7a0bf17ddd1d0b9 100644 --- a/paddle/fluid/inference/tests/api/analyzer_capi_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_capi_tester.cc @@ -36,9 +36,9 @@ void zero_copy_run() { PD_SwitchIrDebug(config, true); PD_SetModel(config, prog_file.c_str(), params_file.c_str()); bool use_feed_fetch = PD_UseFeedFetchOpsEnabled(config); - CHECK(!use_feed_fetch) << "NO"; + EXPECT_FALSE(use_feed_fetch); bool specify_input_names = PD_SpecifyInputName(config); - CHECK(specify_input_names) << "NO"; + EXPECT_TRUE(specify_input_names); const int batch_size = 1; const int channels = 3; @@ -85,13 +85,13 @@ TEST(PD_AnalysisConfig, profile_mkldnn) { PD_SwitchIrDebug(config, true); PD_EnableMKLDNN(config); bool mkldnn_enable = PD_MkldnnEnabled(config); - CHECK(mkldnn_enable) << "NO"; + EXPECT_TRUE(mkldnn_enable); PD_EnableMkldnnQuantizer(config); bool quantizer_enable = PD_MkldnnQuantizerEnabled(config); - CHECK(quantizer_enable) << "NO"; + EXPECT_TRUE(quantizer_enable); PD_EnableMkldnnBfloat16(config); bool bfloat16_enable = PD_MkldnnBfloat16Enabled(config); - CHECK(bfloat16_enable) << "NO"; + EXPECT_TRUE(bfloat16_enable); PD_SetMkldnnCacheCapacity(config, 0); PD_SetModel(config, prog_file.c_str(), params_file.c_str()); PD_DeleteAnalysisConfig(config); diff --git a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc index 00a475b6047e8215264c664dd3c775b9687eb0ff..d61c28c30d203acf4dd48e1461a881d61f8ec263 100644 --- a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc @@ -126,7 +126,9 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, std::string turn_mask_pre = "turn_mask_"; auto one_batch = data->NextBatch(); - PADDLE_ENFORCE(!one_batch.response.empty()); + PADDLE_ENFORCE( + !one_batch.response.empty(), + paddle::platform::errors::Fatal("The response of one batch is empty.")); int size = one_batch.response[0].size(); CHECK_EQ(size, kMaxTurnLen); // turn tensor assignment @@ -214,11 +216,17 @@ void profile(bool use_mkldnn = false) { input_slots_all, &outputs, FLAGS_num_threads); if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { - PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_GT(outputs.size(), 0, + paddle::platform::errors::Fatal( + "The size of outputs should be greater than 0.")); auto output = outputs.back(); - PADDLE_ENFORCE_GT(output.size(), 0); + PADDLE_ENFORCE_GT(output.size(), 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); size_t size = GetSize(output[0]); - PADDLE_ENFORCE_GT(size, 0); + PADDLE_ENFORCE_GT(size, 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); float *result = static_cast(output[0].data.data()); for (size_t i = 0; i < size; i++) { EXPECT_NEAR(result[i], result_data[i], 1e-3); diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc index 7f06a3b9023ba3e907c9731d576f014a3e451113..91a3233b9851f1def7717d04c4c9df5275a805ee 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc @@ -146,8 +146,9 @@ std::shared_ptr> GetWarmupData( auto iterations = test_data.size(); PADDLE_ENFORCE_LE( static_cast(num_images), iterations * test_data_batch_size, - "The requested quantization warmup data size " + - std::to_string(num_images) + " is bigger than all test data size."); + paddle::platform::errors::Fatal( + "The requested quantization warmup data size " + + std::to_string(num_images) + " is bigger than all test data size.")); PaddleTensor images; images.name = "image"; @@ -237,8 +238,9 @@ std::shared_ptr> GetWarmupData( } PADDLE_ENFORCE_EQ( static_cast(num_objects), static_cast(objects_accum), - "The requested num of objects " + std::to_string(num_objects) + - " is the same as objects_accum."); + paddle::platform::errors::Fatal("The requested num of objects " + + std::to_string(num_objects) + + " is the same as objects_accum.")); auto warmup_data = std::make_shared>(4); (*warmup_data)[0] = std::move(images); diff --git a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc index 142905dcd8d9964d93d0c5f7444823eef2b84900..bd3a1d737afb1ba230015fbd602c493f33952ffb 100644 --- a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc @@ -98,7 +98,9 @@ void GetOneBatch(std::vector *input_slots, DataRecord *data, input_tensor.name = "word"; input_tensor.dtype = PaddleDType::INT64; TensorAssignData(&input_tensor, {one_batch.data}, one_batch.lod); - PADDLE_ENFORCE_EQ(batch_size, static_cast(one_batch.lod.size() - 1)); + PADDLE_ENFORCE_EQ( + batch_size, static_cast(one_batch.lod.size() - 1), + paddle::platform::errors::Fatal("The lod size of one batch is invaild.")); input_slots->assign({input_tensor}); } @@ -137,12 +139,17 @@ TEST(Analyzer_LAC, profile) { 24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25, 25, 25, 25, 25, 44, 24, 25, 25, 25, 36, 42, 43, 44, 14, 15, 44, 14, 15, 44, 14, 15, 44, 38, 39, 14, 15, 44, 22, 23, 23, 23, 23, 23, 23, 23}; - PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_GT(outputs.size(), 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); auto output = outputs.back(); - PADDLE_ENFORCE_EQ(output.size(), 1UL); + PADDLE_ENFORCE_EQ(output.size(), 1UL, + paddle::platform::errors::Fatal( + "The size of output should be equal to 1.")); size_t size = GetSize(output[0]); size_t batch1_size = sizeof(lac_ref_data) / sizeof(int64_t); - PADDLE_ENFORCE_GE(size, batch1_size); + PADDLE_ENFORCE_GE(size, batch1_size, paddle::platform::errors::Fatal( + "The size of batch is invaild.")); int64_t *pdata = static_cast(output[0].data.data()); for (size_t i = 0; i < batch1_size; ++i) { EXPECT_EQ(pdata[i], lac_ref_data[i]); diff --git a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc index 2a862b1395c222cf6d23216c9d4cf9196ffb519c..50a68361d536f5aab3ed2a6bafe60f2438a9c129 100644 --- a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc @@ -117,11 +117,17 @@ void profile(bool memory_load = false) { // the first inference result const int chinese_ner_result_data[] = {30, 45, 41, 48, 17, 26, 48, 39, 38, 16, 25}; - PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_GT(outputs.size(), 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); auto output = outputs.back(); - PADDLE_ENFORCE_EQ(output.size(), 1UL); + PADDLE_ENFORCE_EQ(output.size(), 1UL, + paddle::platform::errors::Fatal( + "The size of output should be equal to 1.")); size_t size = GetSize(output[0]); - PADDLE_ENFORCE_GT(size, 0); + PADDLE_ENFORCE_GT(size, 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); int64_t *result = static_cast(output[0].data.data()); for (size_t i = 0; i < std::min(11, size); i++) { EXPECT_EQ(result[i], chinese_ner_result_data[i]); diff --git a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc index 06a8e01b10c6eb70fe2cbac19725d96281863c29..bb1f0e8cd6334bab83973fb7d314f7017edd9e90 100644 --- a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc @@ -136,11 +136,17 @@ TEST(Analyzer_Pyramid_DNN, profile) { input_slots_all, &outputs, FLAGS_num_threads); if (FLAGS_num_threads == 1 && !FLAGS_test_all_data && !FLAGS_zero_copy) { - PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_GT(outputs.size(), 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); auto output = outputs.back(); - PADDLE_ENFORCE_EQ(output.size(), 1UL); + PADDLE_ENFORCE_EQ(output.size(), 1UL, + paddle::platform::errors::Fatal( + "The size of output should be equal to 1.")); size_t size = GetSize(output[0]); - PADDLE_ENFORCE_GT(size, 0); + PADDLE_ENFORCE_GT(size, 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); float *result = static_cast(output[0].data.data()); // output is probability, which is in (0, 1). for (size_t i = 0; i < size; i++) { diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn2_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn2_tester.cc index 9ccbf58cbd2bbaab9b1a132c27e50356e1a5df37..34a0a5f398d7fee0f8e44f0ad59ff9711263b575 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn2_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn2_tester.cc @@ -135,11 +135,17 @@ TEST(Analyzer_rnn2, profile) { if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { // the first inference result - PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_GT(outputs.size(), 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); auto output = outputs.back(); - PADDLE_ENFORCE_GT(output.size(), 0); + PADDLE_ENFORCE_GT(output.size(), 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); size_t size = GetSize(output[0]); - PADDLE_ENFORCE_GT(size, 0); + PADDLE_ENFORCE_GT(size, 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); float *result = static_cast(output[0].data.data()); for (size_t i = 0; i < size; i++) { EXPECT_NEAR(result[i], result_data[i], 1e-3); diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc index e3f8b835f78371170aaf107e1b2d1ca41b300e56..978aaf1c6a32d5b4ec8f2d06b8873af892705da5 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc @@ -47,7 +47,8 @@ struct DataRecord { num_lines++; std::vector data; split(line, '\t', &data); - PADDLE_ENFORCE(data.size() >= 4); + PADDLE_ENFORCE_GT(data.size(), 4, paddle::platform::errors::Fatal( + "The size of data is invaild.")); // load title1 data std::vector title1_data; split_to_int64(data[0], ' ', &title1_data); @@ -120,11 +121,17 @@ TEST(Analyzer_seq_conv1, profile) { if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { // the first inference result - PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_GT(outputs.size(), 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); auto output = outputs.back(); - PADDLE_ENFORCE_EQ(output.size(), 1UL); + PADDLE_ENFORCE_EQ(output.size(), 1UL, + paddle::platform::errors::Fatal( + "The size of output should be equal to 0.")); size_t size = GetSize(output[0]); - PADDLE_ENFORCE_GT(size, 0); + PADDLE_ENFORCE_GT(size, 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); float *result = static_cast(output[0].data.data()); // output is probability, which is in (0, 1). for (size_t i = 0; i < size; i++) { diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc index 56f706ae56bda8b06eba5dd9e080552aa9785c6e..9f1556cdb871aa3e5bbe613aa98299c162661c42 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc @@ -56,20 +56,26 @@ struct DataRecord { std::vector slot_data; split_to_float(data[1], ' ', &slot_data); std::string name = data[0]; - PADDLE_ENFORCE_EQ(slot_data.size() % 11, 0UL, - "line %d, %s should be divisible", num_lines, name); + PADDLE_ENFORCE_EQ( + slot_data.size() % 11, 0UL, + paddle::platform::errors::Fatal("line %d, %s should be divisible", + num_lines, name)); datasets[name].emplace_back(std::move(slot_data)); } num_samples = num_lines / num_slots; - PADDLE_ENFORCE_EQ(num_samples * num_slots, static_cast(num_lines), - "num samples should be divisible"); - PADDLE_ENFORCE_GT(num_samples, 0UL); + PADDLE_ENFORCE_EQ( + num_samples * num_slots, static_cast(num_lines), + paddle::platform::errors::Fatal("num samples should be divisible")); + PADDLE_ENFORCE_GT(num_samples, 0UL, + paddle::platform::errors::Fatal( + "The num of samples should be greater than 0.")); } void Prepare(int bs) { for (auto it = datasets.begin(); it != datasets.end(); ++it) { - PADDLE_ENFORCE_EQ(it->second.size(), num_samples, - "size of each slot should be equal"); + PADDLE_ENFORCE_EQ( + it->second.size(), num_samples, + paddle::platform::errors::Fatal("size of each slot should be equal")); } size_t num_batches = num_samples / bs; EXPECT_GT(num_batches, 0UL); @@ -90,8 +96,10 @@ struct DataRecord { std::copy(datas[id].begin(), datas[id].end(), std::back_inserter(slot.data[k])); size_t len = datas[id].size() / 11; - PADDLE_ENFORCE_EQ(len * 11, datas[id].size(), - "%s %d size should be divisible", slot.name, id); + PADDLE_ENFORCE_EQ( + len * 11, datas[id].size(), + paddle::platform::errors::Fatal("%s %d size should be divisible", + slot.name, id)); lod[k + 1] = lod[k] + len; } slot.shape.assign({static_cast(lod[bs]), 11}); diff --git a/paddle/fluid/inference/tests/api/analyzer_text_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_text_classification_tester.cc index 78e500b2ed530d5a1dce8a7927538fdd0bbb6907..ae38bcbc20a9f44eb6ef5c313b318dec38a30550 100644 --- a/paddle/fluid/inference/tests/api/analyzer_text_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_text_classification_tester.cc @@ -22,7 +22,9 @@ struct DataReader { : file(new std::ifstream(path)) {} bool NextBatch(std::vector *input, int batch_size) { - PADDLE_ENFORCE_EQ(batch_size, 1); + PADDLE_ENFORCE_EQ(batch_size, 1, + paddle::platform::errors::Fatal( + "The size of batch should be equal to 1.")); std::string line; PaddleTensor tensor; tensor.dtype = PaddleDType::INT64; @@ -81,7 +83,9 @@ TEST(Analyzer_Text_Classification, profile) { if (FLAGS_num_threads == 1) { // Get output - PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_GT(outputs.size(), 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); LOG(INFO) << "get outputs " << outputs.back().size(); for (auto &output : outputs.back()) { LOG(INFO) << "output.shape: " << to_string(output.shape); diff --git a/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc b/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc index 65755b7b15ad54e38e398a82db41a0b9d8fc59e3..a2ced21a9ac9ad10c2b067a60597eee9fdff9eeb 100644 --- a/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc @@ -59,7 +59,9 @@ void SetConfig(AnalysisConfig *cfg) { } void SetInput(std::vector> *inputs) { - PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data."); + PADDLE_ENFORCE_EQ( + FLAGS_test_all_data, 0, + paddle::platform::errors::Fatal("Only have single batch of data.")); std::string line; std::ifstream file(FLAGS_infer_data); std::getline(file, line); @@ -99,7 +101,9 @@ void profile(bool use_mkldnn = false) { auto refer = ProcessALine(line); file.close(); - PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_GT(outputs.size(), 0, + paddle::platform::errors::Fatal( + "The size of output should be greater than 0.")); auto &output = outputs.back().front(); size_t numel = output.data.length() / PaddleDtypeSize(output.dtype); CHECK_EQ(numel, refer.data.size()); diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 7183cbac71562bfe4092bf78270096996b74c525..d27959aff6fe2c0167722d0a875fec441ccc6b03 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -21,6 +21,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/io.h" +#include "paddle/fluid/platform/errors.h" #include "paddle/fluid/platform/port.h" #include "paddle/fluid/platform/profiler.h"