diff --git a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc index e8224be2d495dafba46ce4bbb9537e8dcc993a8c..47697077802cc609768dab1ff1121cc5d601ef9e 100644 --- a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc +++ b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc @@ -32,7 +32,7 @@ TEST(inference, fit_a_line) { // The input data should be >= 0 int64_t batch_size = 10; SetupTensor( - input, {batch_size, 13}, static_cast(0), static_cast(10)); + &input, {batch_size, 13}, static_cast(0), static_cast(10)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -51,7 +51,7 @@ TEST(inference, fit_a_line) { cpu_fetchs2.push_back(&output2); // Run inference on CUDA GPU - LOG(INFO) << "--- CPU Runs: ---"; + LOG(INFO) << "--- GPU Runs: ---"; TestInference(dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.dims(); @@ -79,10 +79,8 @@ TEST(multi_thread_inference, fit_a_line) { // The second dim of the input tensor should be 13 // The input data should be >= 0 int64_t batch_size = 10; - SetupTensor(*input, - {batch_size, 13}, - static_cast(0), - static_cast(10)); + SetupTensor( + input, {batch_size, 13}, static_cast(0), static_cast(10)); cpu_feeds[i].push_back(input); } @@ -112,6 +110,7 @@ TEST(multi_thread_inference, fit_a_line) { dirname, cpu_feeds, cpu_fetchs2, num_threads); for (int i = 0; i < num_threads; ++i) { + CheckError(*cpu_fetchs1[i][0], *cpu_fetchs2[i][0]); delete cpu_fetchs2[i][0]; } #endif diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 95c526352c4c3209e8d062be09a7a09954cb30d4..a472ee68c0da00cbceca538b953255b180f2134a 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include #include #include @@ -28,7 +29,7 @@ void SetupTensor(paddle::framework::LoDTensor* input, const T lower, const T upper) { T* input_ptr = input->mutable_data(dims, paddle::platform::CPUPlace()); - unsigned int seed = time(NULL); + unsigned int seed = reinterpret_cast(input); for (int i = 0; i < input->numel(); ++i) { input_ptr[i] = (static_cast(rand_r(&seed)) / static_cast(RAND_MAX)) * (upper - lower) +