From 4337009b9236d93dcd896fb60d27994b1a43a68a Mon Sep 17 00:00:00 2001 From: Zhaolong Xing Date: Wed, 29 May 2019 09:39:45 +0800 Subject: [PATCH] fix trt ci timeout error (#17701) test=develop --- .../fluid/inference/tests/api/CMakeLists.txt | 8 ++- .../fluid/inference/tests/api/tester_helper.h | 2 +- .../inference/tests/api/trt_mobilenet_test.cc | 49 +++++++++++++++++++ .../inference/tests/api/trt_resnet50_test.cc | 30 ++++++++++++ .../inference/tests/api/trt_resnext_test.cc | 30 ++++++++++++ ...trt_models_tester.cc => trt_test_helper.h} | 43 +++------------- 6 files changed, 123 insertions(+), 39 deletions(-) create mode 100644 paddle/fluid/inference/tests/api/trt_mobilenet_test.cc create mode 100644 paddle/fluid/inference/tests/api/trt_resnet50_test.cc create mode 100644 paddle/fluid/inference/tests/api/trt_resnext_test.cc rename paddle/fluid/inference/tests/api/{trt_models_tester.cc => trt_test_helper.h} (81%) diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index f96c920d2..548ef3825 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -243,7 +243,13 @@ if(WITH_GPU AND TENSORRT_FOUND) if (NOT EXISTS ${TRT_MODEL_INSTALL_DIR}) inference_download_and_uncompress(${TRT_MODEL_INSTALL_DIR} ${INFERENCE_URL}/tensorrt_test "trt_test_models.tar.gz") endif() - inference_analysis_test(test_trt_models SRCS trt_models_tester.cc + inference_analysis_test(trt_mobilenet_test SRCS trt_mobilenet_test.cc + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_test_models) + inference_analysis_test(trt_resnet50_test SRCS trt_resnet50_test.cc + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_test_models) + inference_analysis_test(trt_resnext_test SRCS trt_resnext_test.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${TRT_MODEL_INSTALL_DIR}/trt_test_models) endif() diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index a50810948..6dda9ed0e 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -148,7 +148,7 @@ void CompareResult(const std::vector &outputs, case PaddleDType::INT64: { int64_t *pdata = static_cast(out.data.data()); int64_t *pdata_ref = ref_out.data(&place, &ref_size); - EXPECT_EQ(size, ref_size); + EXPECT_EQ(size, static_cast(ref_size)); for (size_t j = 0; j < size; ++j) { EXPECT_EQ(pdata_ref[j], pdata[j]); } diff --git a/paddle/fluid/inference/tests/api/trt_mobilenet_test.cc b/paddle/fluid/inference/tests/api/trt_mobilenet_test.cc new file mode 100644 index 000000000..14539a9d4 --- /dev/null +++ b/paddle/fluid/inference/tests/api/trt_mobilenet_test.cc @@ -0,0 +1,49 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include + +#include "paddle/fluid/inference/tests/api/trt_test_helper.h" + +namespace paddle { +namespace inference { + +TEST(TensorRT_mobilenet, compare) { + std::string model_dir = FLAGS_infer_model + "/mobilenet"; + compare(model_dir, /* use_tensorrt */ true); + // Open it when need. + // profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt); +} + +TEST(AnalysisPredictor, use_gpu) { + std::string model_dir = FLAGS_infer_model + "/" + "mobilenet"; + AnalysisConfig config; + config.EnableUseGpu(100, 0); + config.SetModel(model_dir); + config.pass_builder()->TurnOnDebug(); + + std::vector> inputs_all; + auto predictor = CreatePaddlePredictor(config); + SetFakeImageInput(&inputs_all, model_dir, false, "__model__", ""); + + std::vector outputs; + for (auto& input : inputs_all) { + ASSERT_TRUE(predictor->Run(input, &outputs)); + } +} + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tests/api/trt_resnet50_test.cc b/paddle/fluid/inference/tests/api/trt_resnet50_test.cc new file mode 100644 index 000000000..7dfcbb0d0 --- /dev/null +++ b/paddle/fluid/inference/tests/api/trt_resnet50_test.cc @@ -0,0 +1,30 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include + +#include "paddle/fluid/inference/tests/api/trt_test_helper.h" + +namespace paddle { +namespace inference { + +TEST(resnet50, compare_continuous_input) { + std::string model_dir = FLAGS_infer_model + "/resnet50"; + compare_continuous_input(model_dir, true); +} + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tests/api/trt_resnext_test.cc b/paddle/fluid/inference/tests/api/trt_resnext_test.cc new file mode 100644 index 000000000..588b5bffd --- /dev/null +++ b/paddle/fluid/inference/tests/api/trt_resnext_test.cc @@ -0,0 +1,30 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include + +#include "paddle/fluid/inference/tests/api/trt_test_helper.h" + +namespace paddle { +namespace inference { + +TEST(TensorRT_resnext50, compare) { + std::string model_dir = FLAGS_infer_model + "/resnext50"; + compare(model_dir, /* use_tensorrt */ true); +} + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tests/api/trt_models_tester.cc b/paddle/fluid/inference/tests/api/trt_test_helper.h similarity index 81% rename from paddle/fluid/inference/tests/api/trt_models_tester.cc rename to paddle/fluid/inference/tests/api/trt_test_helper.h index ec10e36c3..0233cad0a 100644 --- a/paddle/fluid/inference/tests/api/trt_models_tester.cc +++ b/paddle/fluid/inference/tests/api/trt_test_helper.h @@ -11,10 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#pragma once +#include +#include -#include -#include -#include +#include "gflags/gflags.h" +#include "glog/logging.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/api/tester_helper.h" @@ -130,39 +133,5 @@ void compare_continuous_input(std::string model_dir, bool use_tensorrt) { } } -TEST(TensorRT_mobilenet, compare) { - std::string model_dir = FLAGS_infer_model + "/mobilenet"; - compare(model_dir, /* use_tensorrt */ true); - // Open it when need. - // profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt); -} - -TEST(resnet50, compare_continuous_input) { - std::string model_dir = FLAGS_infer_model + "/resnet50"; - compare_continuous_input(model_dir, true); -} - -TEST(TensorRT_resnext50, compare) { - std::string model_dir = FLAGS_infer_model + "/resnext50"; - compare(model_dir, /* use_tensorrt */ true); -} - -TEST(AnalysisPredictor, use_gpu) { - std::string model_dir = FLAGS_infer_model + "/" + "mobilenet"; - AnalysisConfig config; - config.EnableUseGpu(100, 0); - config.SetModel(model_dir); - config.pass_builder()->TurnOnDebug(); - - std::vector> inputs_all; - auto predictor = CreatePaddlePredictor(config); - SetFakeImageInput(&inputs_all, model_dir, false, "__model__", ""); - - std::vector outputs; - for (auto& input : inputs_all) { - ASSERT_TRUE(predictor->Run(input, &outputs)); - } -} - } // namespace inference } // namespace paddle -- GitLab