From 746a62ebe6db33ea220ac5c8090439decfab8f64 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 7 Jun 2018 17:31:13 +0800 Subject: [PATCH] add gpu tests --- .../inference/demo/simple_on_word2vec.cc | 10 +++-- .../test_paddle_inference_api_impl.cc | 39 ++++++++++++++++--- 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/paddle/contrib/inference/demo/simple_on_word2vec.cc b/paddle/contrib/inference/demo/simple_on_word2vec.cc index a4ef3b71c53..9c36aa44ec7 100644 --- a/paddle/contrib/inference/demo/simple_on_word2vec.cc +++ b/paddle/contrib/inference/demo/simple_on_word2vec.cc @@ -66,12 +66,12 @@ void Main(bool use_gpu) { } } -void MainThreads(int num_threads) { +void MainThreads(int num_threads, bool use_gpu) { // Multi-threads only support on CPU // 0. Create PaddlePredictor with a config. NativeConfig config; config.model_dir = FLAGS_dirname + "word2vec.inference.model"; - config.use_gpu = false; + config.use_gpu = use_gpu; auto main_predictor = CreatePaddlePredictor(config); @@ -113,11 +113,13 @@ void MainThreads(int num_threads) { } TEST(demo, word2vec_cpu) { Main(false /*use_gpu*/); } -TEST(demo_multi_threads, word2vec_cpu_1) { MainThreads(1); } -TEST(demo_multi_threads, word2vec_cpu_4) { MainThreads(4); } +TEST(demo_multi_threads, word2vec_cpu_1) { MainThreads(1, false /*use_gpu*/); } +TEST(demo_multi_threads, word2vec_cpu_4) { MainThreads(4, false /*use_gpu*/); } #ifdef PADDLE_WITH_CUDA TEST(demo, word2vec_gpu) { Main(true /*use_gpu*/); } +TEST(demo_multi_threads, word2vec_gpu_1) { MainThreads(1, true /*use_gpu*/); } +TEST(demo_multi_threads, word2vec_gpu_4) { MainThreads(4, true /*use_gpu*/); } #endif } // namespace demo diff --git a/paddle/contrib/inference/test_paddle_inference_api_impl.cc b/paddle/contrib/inference/test_paddle_inference_api_impl.cc index 8ffe102cb91..4b6cb7b051d 100644 --- a/paddle/contrib/inference/test_paddle_inference_api_impl.cc +++ b/paddle/contrib/inference/test_paddle_inference_api_impl.cc @@ -56,9 +56,10 @@ NativeConfig GetConfig() { return config; } -TEST(paddle_inference_api_impl, word2vec) { +void MainWord2Vec(bool use_gpu) { NativeConfig config = GetConfig(); auto predictor = CreatePaddlePredictor(config); + config.use_gpu = use_gpu; framework::LoDTensor first_word, second_word, third_word, fourth_word; framework::LoD lod{{0, 1}}; @@ -106,11 +107,12 @@ TEST(paddle_inference_api_impl, word2vec) { free(outputs[0].data.data); } -TEST(paddle_inference_api_impl, image_classification) { +void MainImageClassification(bool use_gpu) { int batch_size = 2; bool use_mkldnn = false; bool repeat = false; NativeConfig config = GetConfig(); + config.use_gpu = use_gpu; config.model_dir = FLAGS_dirname + "image_classification_resnet.inference.model"; @@ -155,9 +157,9 @@ TEST(paddle_inference_api_impl, image_classification) { free(data); } -TEST(paddle_inference_api_native_multithreads, word2vec) { +void MainThreadsWord2Vec(bool use_gpu) { NativeConfig config = GetConfig(); - config.use_gpu = false; + config.use_gpu = use_gpu; auto main_predictor = CreatePaddlePredictor(config); // prepare inputs data and reference results @@ -216,11 +218,11 @@ TEST(paddle_inference_api_native_multithreads, word2vec) { } } -TEST(paddle_inference_api_native_multithreads, image_classification) { +void MainThreadsImageClassification(bool use_gpu) { constexpr int num_jobs = 4; // each job run 1 batch constexpr int batch_size = 1; NativeConfig config = GetConfig(); - config.use_gpu = false; + config.use_gpu = use_gpu; config.model_dir = FLAGS_dirname + "image_classification_resnet.inference.model"; @@ -269,4 +271,29 @@ TEST(paddle_inference_api_native_multithreads, image_classification) { } } +TEST(inference_api_native, word2vec_cpu) { MainWord2Vec(false /*use_gpu*/); } +TEST(inference_api_native, word2vec_cpu_threads) { + MainThreadsWord2Vec(false /*use_gpu*/); +} +TEST(inference_api_native, image_classification_cpu) { + MainThreadsImageClassification(false /*use_gpu*/); +} +TEST(inference_api_native, image_classification_cpu_threads) { + MainThreadsImageClassification(false /*use_gpu*/); +} + +#ifdef PADDLE_WITH_CUDA +TEST(inference_api_native, word2vec_gpu) { MainWord2Vec(true /*use_gpu*/); } +TEST(inference_api_native, word2vec_gpu_threads) { + MainThreadsWord2Vec(true /*use_gpu*/); +} +TEST(inference_api_native, image_classification_gpu) { + MainThreadsImageClassification(true /*use_gpu*/); +} +TEST(inference_api_native, image_classification_gpu_threads) { + MainThreadsImageClassification(true /*use_gpu*/); +} + +#endif + } // namespace paddle -- GitLab