未验证 提交 bd77a425 编写于 作者: 石晓伟 提交者: GitHub

error messages of inference/tests, test=develop (#27259)

上级 bd41c314
......@@ -245,8 +245,14 @@ TEST(Analyzer_bert, transfer_scope_cache) {
// Since paddle::framework::global_transfer_scope_cache() and
// paddle::framework::global_transfer_data_cache() are thread_local,
// their pointer should be different among different thread id.
PADDLE_ENFORCE(global_transfer_scope_cache.size(), threads_num);
PADDLE_ENFORCE(global_transfer_data_cache.size(), threads_num);
PADDLE_ENFORCE_EQ(
global_transfer_scope_cache.size(), threads_num,
paddle::platform::errors::Fatal(
"The size of scope cache is not equal to thread number."));
PADDLE_ENFORCE_EQ(
global_transfer_data_cache.size(), threads_num,
paddle::platform::errors::Fatal(
"The size of data cache is not equal to thread number."));
}
} // namespace inference
......
......@@ -69,11 +69,13 @@ void PD_run() {
PD_DeletePaddleTensor(input);
int size;
const int* out_shape = PD_GetPaddleTensorShape(out_data, &size);
CHECK(size == 2) << "The Output shape's size is NOT match.";
PADDLE_ENFORCE_EQ(size, 2, paddle::platform::errors::InvalidArgument(
"The Output shape's size is NOT match."));
std::vector<int> ref_outshape_size({9, 6});
for (int i = 0; i < 2; ++i) {
CHECK(out_shape[i] == ref_outshape_size[i])
<< "The Output's shape is NOT match.";
PADDLE_ENFORCE_EQ(out_shape[i], ref_outshape_size[i],
paddle::platform::errors::InvalidArgument(
"The Output shape's size is NOT match."));
}
PD_DeletePaddleBuf(buf);
}
......
......@@ -36,9 +36,9 @@ void zero_copy_run() {
PD_SwitchIrDebug(config, true);
PD_SetModel(config, prog_file.c_str(), params_file.c_str());
bool use_feed_fetch = PD_UseFeedFetchOpsEnabled(config);
CHECK(!use_feed_fetch) << "NO";
EXPECT_FALSE(use_feed_fetch);
bool specify_input_names = PD_SpecifyInputName(config);
CHECK(specify_input_names) << "NO";
EXPECT_TRUE(specify_input_names);
const int batch_size = 1;
const int channels = 3;
......@@ -85,13 +85,13 @@ TEST(PD_AnalysisConfig, profile_mkldnn) {
PD_SwitchIrDebug(config, true);
PD_EnableMKLDNN(config);
bool mkldnn_enable = PD_MkldnnEnabled(config);
CHECK(mkldnn_enable) << "NO";
EXPECT_TRUE(mkldnn_enable);
PD_EnableMkldnnQuantizer(config);
bool quantizer_enable = PD_MkldnnQuantizerEnabled(config);
CHECK(quantizer_enable) << "NO";
EXPECT_TRUE(quantizer_enable);
PD_EnableMkldnnBfloat16(config);
bool bfloat16_enable = PD_MkldnnBfloat16Enabled(config);
CHECK(bfloat16_enable) << "NO";
EXPECT_TRUE(bfloat16_enable);
PD_SetMkldnnCacheCapacity(config, 0);
PD_SetModel(config, prog_file.c_str(), params_file.c_str());
PD_DeleteAnalysisConfig(config);
......
......@@ -126,7 +126,9 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
std::string turn_mask_pre = "turn_mask_";
auto one_batch = data->NextBatch();
PADDLE_ENFORCE(!one_batch.response.empty());
PADDLE_ENFORCE(
!one_batch.response.empty(),
paddle::platform::errors::Fatal("The response of one batch is empty."));
int size = one_batch.response[0].size();
CHECK_EQ(size, kMaxTurnLen);
// turn tensor assignment
......@@ -214,11 +216,17 @@ void profile(bool use_mkldnn = false) {
input_slots_all, &outputs, FLAGS_num_threads);
if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) {
PADDLE_ENFORCE_GT(outputs.size(), 0);
PADDLE_ENFORCE_GT(outputs.size(), 0,
paddle::platform::errors::Fatal(
"The size of outputs should be greater than 0."));
auto output = outputs.back();
PADDLE_ENFORCE_GT(output.size(), 0);
PADDLE_ENFORCE_GT(output.size(), 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
size_t size = GetSize(output[0]);
PADDLE_ENFORCE_GT(size, 0);
PADDLE_ENFORCE_GT(size, 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
float *result = static_cast<float *>(output[0].data.data());
for (size_t i = 0; i < size; i++) {
EXPECT_NEAR(result[i], result_data[i], 1e-3);
......
......@@ -146,8 +146,9 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData(
auto iterations = test_data.size();
PADDLE_ENFORCE_LE(
static_cast<size_t>(num_images), iterations * test_data_batch_size,
paddle::platform::errors::Fatal(
"The requested quantization warmup data size " +
std::to_string(num_images) + " is bigger than all test data size.");
std::to_string(num_images) + " is bigger than all test data size."));
PaddleTensor images;
images.name = "image";
......@@ -237,8 +238,9 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData(
}
PADDLE_ENFORCE_EQ(
static_cast<size_t>(num_objects), static_cast<size_t>(objects_accum),
"The requested num of objects " + std::to_string(num_objects) +
" is the same as objects_accum.");
paddle::platform::errors::Fatal("The requested num of objects " +
std::to_string(num_objects) +
" is the same as objects_accum."));
auto warmup_data = std::make_shared<std::vector<PaddleTensor>>(4);
(*warmup_data)[0] = std::move(images);
......
......@@ -98,7 +98,9 @@ void GetOneBatch(std::vector<PaddleTensor> *input_slots, DataRecord *data,
input_tensor.name = "word";
input_tensor.dtype = PaddleDType::INT64;
TensorAssignData<int64_t>(&input_tensor, {one_batch.data}, one_batch.lod);
PADDLE_ENFORCE_EQ(batch_size, static_cast<int>(one_batch.lod.size() - 1));
PADDLE_ENFORCE_EQ(
batch_size, static_cast<int>(one_batch.lod.size() - 1),
paddle::platform::errors::Fatal("The lod size of one batch is invaild."));
input_slots->assign({input_tensor});
}
......@@ -137,12 +139,17 @@ TEST(Analyzer_LAC, profile) {
24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25, 25, 25, 25, 25,
44, 24, 25, 25, 25, 36, 42, 43, 44, 14, 15, 44, 14, 15, 44, 14,
15, 44, 38, 39, 14, 15, 44, 22, 23, 23, 23, 23, 23, 23, 23};
PADDLE_ENFORCE_GT(outputs.size(), 0);
PADDLE_ENFORCE_GT(outputs.size(), 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
auto output = outputs.back();
PADDLE_ENFORCE_EQ(output.size(), 1UL);
PADDLE_ENFORCE_EQ(output.size(), 1UL,
paddle::platform::errors::Fatal(
"The size of output should be equal to 1."));
size_t size = GetSize(output[0]);
size_t batch1_size = sizeof(lac_ref_data) / sizeof(int64_t);
PADDLE_ENFORCE_GE(size, batch1_size);
PADDLE_ENFORCE_GE(size, batch1_size, paddle::platform::errors::Fatal(
"The size of batch is invaild."));
int64_t *pdata = static_cast<int64_t *>(output[0].data.data());
for (size_t i = 0; i < batch1_size; ++i) {
EXPECT_EQ(pdata[i], lac_ref_data[i]);
......
......@@ -117,11 +117,17 @@ void profile(bool memory_load = false) {
// the first inference result
const int chinese_ner_result_data[] = {30, 45, 41, 48, 17, 26,
48, 39, 38, 16, 25};
PADDLE_ENFORCE_GT(outputs.size(), 0);
PADDLE_ENFORCE_GT(outputs.size(), 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
auto output = outputs.back();
PADDLE_ENFORCE_EQ(output.size(), 1UL);
PADDLE_ENFORCE_EQ(output.size(), 1UL,
paddle::platform::errors::Fatal(
"The size of output should be equal to 1."));
size_t size = GetSize(output[0]);
PADDLE_ENFORCE_GT(size, 0);
PADDLE_ENFORCE_GT(size, 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
int64_t *result = static_cast<int64_t *>(output[0].data.data());
for (size_t i = 0; i < std::min<size_t>(11, size); i++) {
EXPECT_EQ(result[i], chinese_ner_result_data[i]);
......
......@@ -136,11 +136,17 @@ TEST(Analyzer_Pyramid_DNN, profile) {
input_slots_all, &outputs, FLAGS_num_threads);
if (FLAGS_num_threads == 1 && !FLAGS_test_all_data && !FLAGS_zero_copy) {
PADDLE_ENFORCE_GT(outputs.size(), 0);
PADDLE_ENFORCE_GT(outputs.size(), 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
auto output = outputs.back();
PADDLE_ENFORCE_EQ(output.size(), 1UL);
PADDLE_ENFORCE_EQ(output.size(), 1UL,
paddle::platform::errors::Fatal(
"The size of output should be equal to 1."));
size_t size = GetSize(output[0]);
PADDLE_ENFORCE_GT(size, 0);
PADDLE_ENFORCE_GT(size, 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
float *result = static_cast<float *>(output[0].data.data());
// output is probability, which is in (0, 1).
for (size_t i = 0; i < size; i++) {
......
......@@ -135,11 +135,17 @@ TEST(Analyzer_rnn2, profile) {
if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) {
// the first inference result
PADDLE_ENFORCE_GT(outputs.size(), 0);
PADDLE_ENFORCE_GT(outputs.size(), 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
auto output = outputs.back();
PADDLE_ENFORCE_GT(output.size(), 0);
PADDLE_ENFORCE_GT(output.size(), 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
size_t size = GetSize(output[0]);
PADDLE_ENFORCE_GT(size, 0);
PADDLE_ENFORCE_GT(size, 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
float *result = static_cast<float *>(output[0].data.data());
for (size_t i = 0; i < size; i++) {
EXPECT_NEAR(result[i], result_data[i], 1e-3);
......
......@@ -47,7 +47,8 @@ struct DataRecord {
num_lines++;
std::vector<std::string> data;
split(line, '\t', &data);
PADDLE_ENFORCE(data.size() >= 4);
PADDLE_ENFORCE_GT(data.size(), 4, paddle::platform::errors::Fatal(
"The size of data is invaild."));
// load title1 data
std::vector<int64_t> title1_data;
split_to_int64(data[0], ' ', &title1_data);
......@@ -120,11 +121,17 @@ TEST(Analyzer_seq_conv1, profile) {
if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) {
// the first inference result
PADDLE_ENFORCE_GT(outputs.size(), 0);
PADDLE_ENFORCE_GT(outputs.size(), 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
auto output = outputs.back();
PADDLE_ENFORCE_EQ(output.size(), 1UL);
PADDLE_ENFORCE_EQ(output.size(), 1UL,
paddle::platform::errors::Fatal(
"The size of output should be equal to 0."));
size_t size = GetSize(output[0]);
PADDLE_ENFORCE_GT(size, 0);
PADDLE_ENFORCE_GT(size, 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
float *result = static_cast<float *>(output[0].data.data());
// output is probability, which is in (0, 1).
for (size_t i = 0; i < size; i++) {
......
......@@ -56,20 +56,26 @@ struct DataRecord {
std::vector<float> slot_data;
split_to_float(data[1], ' ', &slot_data);
std::string name = data[0];
PADDLE_ENFORCE_EQ(slot_data.size() % 11, 0UL,
"line %d, %s should be divisible", num_lines, name);
PADDLE_ENFORCE_EQ(
slot_data.size() % 11, 0UL,
paddle::platform::errors::Fatal("line %d, %s should be divisible",
num_lines, name));
datasets[name].emplace_back(std::move(slot_data));
}
num_samples = num_lines / num_slots;
PADDLE_ENFORCE_EQ(num_samples * num_slots, static_cast<size_t>(num_lines),
"num samples should be divisible");
PADDLE_ENFORCE_GT(num_samples, 0UL);
PADDLE_ENFORCE_EQ(
num_samples * num_slots, static_cast<size_t>(num_lines),
paddle::platform::errors::Fatal("num samples should be divisible"));
PADDLE_ENFORCE_GT(num_samples, 0UL,
paddle::platform::errors::Fatal(
"The num of samples should be greater than 0."));
}
void Prepare(int bs) {
for (auto it = datasets.begin(); it != datasets.end(); ++it) {
PADDLE_ENFORCE_EQ(it->second.size(), num_samples,
"size of each slot should be equal");
PADDLE_ENFORCE_EQ(
it->second.size(), num_samples,
paddle::platform::errors::Fatal("size of each slot should be equal"));
}
size_t num_batches = num_samples / bs;
EXPECT_GT(num_batches, 0UL);
......@@ -90,8 +96,10 @@ struct DataRecord {
std::copy(datas[id].begin(), datas[id].end(),
std::back_inserter(slot.data[k]));
size_t len = datas[id].size() / 11;
PADDLE_ENFORCE_EQ(len * 11, datas[id].size(),
"%s %d size should be divisible", slot.name, id);
PADDLE_ENFORCE_EQ(
len * 11, datas[id].size(),
paddle::platform::errors::Fatal("%s %d size should be divisible",
slot.name, id));
lod[k + 1] = lod[k] + len;
}
slot.shape.assign({static_cast<int>(lod[bs]), 11});
......
......@@ -22,7 +22,9 @@ struct DataReader {
: file(new std::ifstream(path)) {}
bool NextBatch(std::vector<PaddleTensor> *input, int batch_size) {
PADDLE_ENFORCE_EQ(batch_size, 1);
PADDLE_ENFORCE_EQ(batch_size, 1,
paddle::platform::errors::Fatal(
"The size of batch should be equal to 1."));
std::string line;
PaddleTensor tensor;
tensor.dtype = PaddleDType::INT64;
......@@ -81,7 +83,9 @@ TEST(Analyzer_Text_Classification, profile) {
if (FLAGS_num_threads == 1) {
// Get output
PADDLE_ENFORCE_GT(outputs.size(), 0);
PADDLE_ENFORCE_GT(outputs.size(), 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
LOG(INFO) << "get outputs " << outputs.back().size();
for (auto &output : outputs.back()) {
LOG(INFO) << "output.shape: " << to_string(output.shape);
......
......@@ -59,7 +59,9 @@ void SetConfig(AnalysisConfig *cfg) {
}
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data.");
PADDLE_ENFORCE_EQ(
FLAGS_test_all_data, 0,
paddle::platform::errors::Fatal("Only have single batch of data."));
std::string line;
std::ifstream file(FLAGS_infer_data);
std::getline(file, line);
......@@ -99,7 +101,9 @@ void profile(bool use_mkldnn = false) {
auto refer = ProcessALine(line);
file.close();
PADDLE_ENFORCE_GT(outputs.size(), 0);
PADDLE_ENFORCE_GT(outputs.size(), 0,
paddle::platform::errors::Fatal(
"The size of output should be greater than 0."));
auto &output = outputs.back().front();
size_t numel = output.data.length() / PaddleDtypeSize(output.dtype);
CHECK_EQ(numel, refer.data.size());
......
......@@ -21,6 +21,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/inference/io.h"
#include "paddle/fluid/platform/errors.h"
#include "paddle/fluid/platform/port.h"
#include "paddle/fluid/platform/profiler.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册