提交 746a62eb 编写于 作者: T tensor-tang

add gpu tests

上级 35e820dc
...@@ -66,12 +66,12 @@ void Main(bool use_gpu) { ...@@ -66,12 +66,12 @@ void Main(bool use_gpu) {
} }
} }
void MainThreads(int num_threads) { void MainThreads(int num_threads, bool use_gpu) {
// Multi-threads only support on CPU // Multi-threads only support on CPU
// 0. Create PaddlePredictor with a config. // 0. Create PaddlePredictor with a config.
NativeConfig config; NativeConfig config;
config.model_dir = FLAGS_dirname + "word2vec.inference.model"; config.model_dir = FLAGS_dirname + "word2vec.inference.model";
config.use_gpu = false; config.use_gpu = use_gpu;
auto main_predictor = auto main_predictor =
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config); CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
...@@ -113,11 +113,13 @@ void MainThreads(int num_threads) { ...@@ -113,11 +113,13 @@ void MainThreads(int num_threads) {
} }
TEST(demo, word2vec_cpu) { Main(false /*use_gpu*/); } TEST(demo, word2vec_cpu) { Main(false /*use_gpu*/); }
TEST(demo_multi_threads, word2vec_cpu_1) { MainThreads(1); } TEST(demo_multi_threads, word2vec_cpu_1) { MainThreads(1, false /*use_gpu*/); }
TEST(demo_multi_threads, word2vec_cpu_4) { MainThreads(4); } TEST(demo_multi_threads, word2vec_cpu_4) { MainThreads(4, false /*use_gpu*/); }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
TEST(demo, word2vec_gpu) { Main(true /*use_gpu*/); } TEST(demo, word2vec_gpu) { Main(true /*use_gpu*/); }
TEST(demo_multi_threads, word2vec_gpu_1) { MainThreads(1, true /*use_gpu*/); }
TEST(demo_multi_threads, word2vec_gpu_4) { MainThreads(4, true /*use_gpu*/); }
#endif #endif
} // namespace demo } // namespace demo
......
...@@ -56,9 +56,10 @@ NativeConfig GetConfig() { ...@@ -56,9 +56,10 @@ NativeConfig GetConfig() {
return config; return config;
} }
TEST(paddle_inference_api_impl, word2vec) { void MainWord2Vec(bool use_gpu) {
NativeConfig config = GetConfig(); NativeConfig config = GetConfig();
auto predictor = CreatePaddlePredictor<NativeConfig>(config); auto predictor = CreatePaddlePredictor<NativeConfig>(config);
config.use_gpu = use_gpu;
framework::LoDTensor first_word, second_word, third_word, fourth_word; framework::LoDTensor first_word, second_word, third_word, fourth_word;
framework::LoD lod{{0, 1}}; framework::LoD lod{{0, 1}};
...@@ -106,11 +107,12 @@ TEST(paddle_inference_api_impl, word2vec) { ...@@ -106,11 +107,12 @@ TEST(paddle_inference_api_impl, word2vec) {
free(outputs[0].data.data); free(outputs[0].data.data);
} }
TEST(paddle_inference_api_impl, image_classification) { void MainImageClassification(bool use_gpu) {
int batch_size = 2; int batch_size = 2;
bool use_mkldnn = false; bool use_mkldnn = false;
bool repeat = false; bool repeat = false;
NativeConfig config = GetConfig(); NativeConfig config = GetConfig();
config.use_gpu = use_gpu;
config.model_dir = config.model_dir =
FLAGS_dirname + "image_classification_resnet.inference.model"; FLAGS_dirname + "image_classification_resnet.inference.model";
...@@ -155,9 +157,9 @@ TEST(paddle_inference_api_impl, image_classification) { ...@@ -155,9 +157,9 @@ TEST(paddle_inference_api_impl, image_classification) {
free(data); free(data);
} }
TEST(paddle_inference_api_native_multithreads, word2vec) { void MainThreadsWord2Vec(bool use_gpu) {
NativeConfig config = GetConfig(); NativeConfig config = GetConfig();
config.use_gpu = false; config.use_gpu = use_gpu;
auto main_predictor = CreatePaddlePredictor<NativeConfig>(config); auto main_predictor = CreatePaddlePredictor<NativeConfig>(config);
// prepare inputs data and reference results // prepare inputs data and reference results
...@@ -216,11 +218,11 @@ TEST(paddle_inference_api_native_multithreads, word2vec) { ...@@ -216,11 +218,11 @@ TEST(paddle_inference_api_native_multithreads, word2vec) {
} }
} }
TEST(paddle_inference_api_native_multithreads, image_classification) { void MainThreadsImageClassification(bool use_gpu) {
constexpr int num_jobs = 4; // each job run 1 batch constexpr int num_jobs = 4; // each job run 1 batch
constexpr int batch_size = 1; constexpr int batch_size = 1;
NativeConfig config = GetConfig(); NativeConfig config = GetConfig();
config.use_gpu = false; config.use_gpu = use_gpu;
config.model_dir = config.model_dir =
FLAGS_dirname + "image_classification_resnet.inference.model"; FLAGS_dirname + "image_classification_resnet.inference.model";
...@@ -269,4 +271,29 @@ TEST(paddle_inference_api_native_multithreads, image_classification) { ...@@ -269,4 +271,29 @@ TEST(paddle_inference_api_native_multithreads, image_classification) {
} }
} }
TEST(inference_api_native, word2vec_cpu) { MainWord2Vec(false /*use_gpu*/); }
TEST(inference_api_native, word2vec_cpu_threads) {
MainThreadsWord2Vec(false /*use_gpu*/);
}
TEST(inference_api_native, image_classification_cpu) {
MainThreadsImageClassification(false /*use_gpu*/);
}
TEST(inference_api_native, image_classification_cpu_threads) {
MainThreadsImageClassification(false /*use_gpu*/);
}
#ifdef PADDLE_WITH_CUDA
TEST(inference_api_native, word2vec_gpu) { MainWord2Vec(true /*use_gpu*/); }
TEST(inference_api_native, word2vec_gpu_threads) {
MainThreadsWord2Vec(true /*use_gpu*/);
}
TEST(inference_api_native, image_classification_gpu) {
MainThreadsImageClassification(true /*use_gpu*/);
}
TEST(inference_api_native, image_classification_gpu_threads) {
MainThreadsImageClassification(true /*use_gpu*/);
}
#endif
} // namespace paddle } // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册