From 27f553b37716ce3074cc75747e93f55fbccc68bb Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 4 Apr 2018 06:12:56 +0000 Subject: [PATCH] Add the check of CPU results and GPU results in multi-thread unittest. --- .../inference/tests/book/test_inference_fit_a_line.cc | 11 +++++------ paddle/fluid/inference/tests/test_helper.h | 3 ++- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc index e8224be2d4..4769707780 100644 --- a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc +++ b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc @@ -32,7 +32,7 @@ TEST(inference, fit_a_line) { // The input data should be >= 0 int64_t batch_size = 10; SetupTensor( - input, {batch_size, 13}, static_cast(0), static_cast(10)); + &input, {batch_size, 13}, static_cast(0), static_cast(10)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -51,7 +51,7 @@ TEST(inference, fit_a_line) { cpu_fetchs2.push_back(&output2); // Run inference on CUDA GPU - LOG(INFO) << "--- CPU Runs: ---"; + LOG(INFO) << "--- GPU Runs: ---"; TestInference(dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.dims(); @@ -79,10 +79,8 @@ TEST(multi_thread_inference, fit_a_line) { // The second dim of the input tensor should be 13 // The input data should be >= 0 int64_t batch_size = 10; - SetupTensor(*input, - {batch_size, 13}, - static_cast(0), - static_cast(10)); + SetupTensor( + input, {batch_size, 13}, static_cast(0), static_cast(10)); cpu_feeds[i].push_back(input); } @@ -112,6 +110,7 @@ TEST(multi_thread_inference, fit_a_line) { dirname, cpu_feeds, cpu_fetchs2, num_threads); for (int i = 0; i < num_threads; ++i) { + CheckError(*cpu_fetchs1[i][0], *cpu_fetchs2[i][0]); delete cpu_fetchs2[i][0]; } #endif diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 95c526352c..a472ee68c0 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include #include #include @@ -28,7 +29,7 @@ void SetupTensor(paddle::framework::LoDTensor* input, const T lower, const T upper) { T* input_ptr = input->mutable_data(dims, paddle::platform::CPUPlace()); - unsigned int seed = time(NULL); + unsigned int seed = reinterpret_cast(input); for (int i = 0; i < input->numel(); ++i) { input_ptr[i] = (static_cast(rand_r(&seed)) / static_cast(RAND_MAX)) * (upper - lower) + -- GitLab