未验证 提交 8b72a1a7 编写于 作者: P Peihan 提交者: GitHub

add resnet50 trt tests in pr-ci-inference (#34465)

* add resnet50 trt test in pr-ci-inference test
上级 72a9c8ff
cmake_minimum_required(VERSION 3.0)
project(cpp_inference_demo CXX C)
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." OFF)
option(USE_TENSORRT "Compile demo with TensorRT." OFF)
option(WITH_GTEST "Compile demo with GTEST" OFF)
if(NOT WITH_STATIC_LIB)
add_definitions("-DPADDLE_WITH_SHARED_LIB")
else()
# PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode.
# Set it to empty in static library mode to avoid compilation issues.
add_definitions("/DPD_INFER_DECL=")
endif()
macro(safe_set_static_flag)
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endmacro()
if(NOT DEFINED PADDLE_LIB)
message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
endif()
if(NOT DEFINED DEMO_NAME)
message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name")
endif()
include_directories("${PADDLE_LIB}/")
set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}cryptopp/include")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}cryptopp/lib")
link_directories("${PADDLE_LIB}/paddle/lib")
if (WIN32)
add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
option(MSVC_STATIC_CRT "use static C Runtime library by default" ON)
if (MSVC_STATIC_CRT)
if (WITH_MKL)
set(FLAG_OPENMP "/openmp")
endif()
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
safe_set_static_flag()
if (WITH_STATIC_LIB)
add_definitions(-DSTATIC_LIB)
endif()
endif()
else()
if(WITH_MKL)
set(FLAG_OPENMP "-fopenmp")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ${FLAG_OPENMP}")
endif()
if(WITH_GPU)
if(NOT WIN32)
set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library")
else()
if(NOT DEFINED CUDA_LIB)
if(DEFINED ENV{CUDA_PATH})
set(CUDA_LIB "$ENV{CUDA_PATH}\\lib\\x64")
else()
set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64")
endif()
endif()
endif(NOT WIN32)
endif()
if (USE_TENSORRT AND WITH_GPU)
set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library")
if("${TENSORRT_ROOT}" STREQUAL "")
message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ")
endif()
set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include)
set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib)
file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
"${TENSORRT_VERSION_FILE_CONTENTS}")
if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS)
string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
"${TENSORRT_VERSION_FILE_CONTENTS}")
endif()
if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
message(SEND_ERROR "Failed to detect TensorRT version.")
endif()
string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
"Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
include_directories("${TENSORRT_INCLUDE_DIR}")
link_directories("${TENSORRT_LIB_DIR}")
endif()
if(WITH_MKL)
set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml")
include_directories("${MATH_LIB_PATH}/include")
if(WIN32)
set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX}
${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn")
if(EXISTS ${MKLDNN_PATH})
include_directories("${MKLDNN_PATH}/include")
if(WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
else(WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
endif(WIN32)
endif()
else()
set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas")
include_directories("${OPENBLAS_LIB_PATH}/include/openblas")
if(WIN32)
set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif()
if(WITH_STATIC_LIB)
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
if(WIN32)
set(DEPS ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
endif()
if (NOT WIN32)
set(EXTERNAL_LIB "-lrt -ldl -lpthread")
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags protobuf xxhash cryptopp
${EXTERNAL_LIB})
else()
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags_static libprotobuf xxhash cryptopp-static ${EXTERNAL_LIB})
set(DEPS ${DEPS} shlwapi.lib)
endif(NOT WIN32)
if(WITH_GPU)
if(NOT WIN32)
if (USE_TENSORRT)
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
if(USE_TENSORRT)
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif()
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} )
endif()
endif()
if(WITH_GTEST)
include(ExternalProject)
include(external-cmake/gtest-cpp.cmake)
endif()
add_executable(${DEMO_NAME} ${DEMO_NAME}.cc)
target_link_libraries(${DEMO_NAME} ${DEPS})
if(WITH_GTEST)
include(GNUInstallDirs)
include_directories(${GTEST_INSTALL_DIR}/include)
add_dependencies(${DEMO_NAME} thirdparty_gtest)
target_link_libraries(${DEMO_NAME} ${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
if(WIN32)
if(USE_TENSORRT)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
)
if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE})
endif()
endif()
if(WITH_MKL)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release
COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release
)
else()
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release
)
endif()
if(NOT WITH_STATIC_LIB)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
)
endif()
endif()
find_package(Git REQUIRED)
message("${CMAKE_BUILD_TYPE}")
SET(GTEST_PREFIX_DIR ${CMAKE_CURRENT_BINARY_DIR}/gtest)
SET(GTEST_SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}/gtest/src/extern_gtest)
SET(GTEST_INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR}/install/gtest)
SET(GTEST_INCLUDE_DIR "${GTEST_INSTALL_DIR}/include" CACHE PATH "gtest include directory." FORCE)
set(GTEST_REPOSITORY https://github.com/google/googletest.git)
set(GTEST_TAG release-1.8.1)
INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR})
IF(WIN32)
set(GTEST_LIBRARIES
"${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE)
set(GTEST_MAIN_LIBRARIES
"${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE)
ELSE()
set(GTEST_LIBRARIES
"${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest.a" CACHE FILEPATH "gtest libraries." FORCE)
set(GTEST_MAIN_LIBRARIES
"${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest_main.a" CACHE FILEPATH "gtest main libraries." FORCE)
ENDIF(WIN32)
ExternalProject_Add(
extern_gtest
PREFIX gtest
GIT_REPOSITORY ${GTEST_REPOSITORY}
GIT_TAG ${GTEST_TAG}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
BUILD_BYPRODUCTS ${GTEST_LIBRARIES}
BUILD_BYPRODUCTS ${GTEST_MAIN_LIBRARIES}
)
ADD_LIBRARY(thirdparty_gtest STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET thirdparty_gtest PROPERTY IMPORTED_LOCATION ${GTEST_LIBRARIES})
ADD_DEPENDENCIES(thirdparty_gtest extern_gtest)
ADD_LIBRARY(thirdparty_gtest_main STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET thirdparty_gtest_main PROPERTY IMPORTED_LOCATION ${GTEST_MAIN_LIBRARIES})
ADD_DEPENDENCIES(thirdparty_gtest_main extern_gtest)
#!/bin/bash
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
PADDLE_ROOT=$1
TURN_ON_MKL=$2 # use MKL or Openblas
TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
DATA_DIR=$4 # dataset
TENSORRT_ROOT_DIR=$5 # TensorRT ROOT dir, default to /usr/local/TensorRT
MSVC_STATIC_CRT=$6
inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir
EXIT_CODE=0 # init default exit code
cd `dirname $0`
current_dir=`pwd`
if [ $2 == ON ]; then
# You can export yourself if move the install path
MKL_LIB=${inference_install_dir}/third_party/install/mklml/lib
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB}
fi
if [ $3 == ON ]; then
use_gpu_list='true false'
else
use_gpu_list='false'
fi
USE_TENSORRT=OFF
if [ -d "$TENSORRT_ROOT_DIR" ]; then
USE_TENSORRT=ON
fi
# download vis_demo data
function download() {
url_prefix=$1
model_name=$2
mkdir -p $model_name
cd $model_name
if [[ -e "${model_name}.tgz" ]]; then
echo "${model_name}.tgz has been downloaded."
else
wget -q --no-proxy ${url_prefix}/${model_name}.tgz
tar xzf *.tgz
fi
cd ..
}
mkdir -p $DATA_DIR
cd $DATA_DIR
download_list='resnet50'
for model_name in $download_list; do
url_prefix="https://paddle-inference-dist.bj.bcebos.com/Paddle-Inference-Demo"
download $url_prefix $model_name
done
# compile and run test
cd $current_dir
mkdir -p build
cd build
rm -rf *
# ---------tensorrt resnet50 on linux---------
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
rm -rf *
cmake .. -DPADDLE_LIB=${inference_install_dir} \
-DWITH_MKL=$TURN_ON_MKL \
-DDEMO_NAME=test_resnet50 \
-DWITH_GPU=$TEST_GPU_CPU \
-DWITH_STATIC_LIB=OFF \
-DUSE_TENSORRT=$USE_TENSORRT \
-DTENSORRT_ROOT=$TENSORRT_ROOT_DIR \
-DWITH_GTEST=ON
make -j$(nproc)
./test_resnet50 \
--modeldir=$DATA_DIR/resnet50/resnet50 \
--gtest_output=xml:test_resnet50.xml
if [ $? -ne 0 ]; then
echo "test_resnet50 runs failed" >> ${current_dir}/build/test_summary.txt
EXIT_CODE=1
fi
fi
if [[ -f ${current_dir}/build/test_summary.txt ]];then
echo "=====================test summary======================"
cat ${current_dir}/build/test_summary.txt
echo "========================================================"
fi
echo "infer_ut script finished"
exit ${EXIT_CODE}
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test_suite.h" // NOLINT
DEFINE_string(modeldir, "", "Directory of the inference model.");
namespace paddle_infer {
paddle::test::Record PrepareInput(int batch_size) {
// init input data
int channel = 3;
int width = 224;
int height = 224;
paddle::test::Record image_Record;
int input_num = batch_size * channel * width * height;
std::vector<float> input_data(input_num, 1);
image_Record.data = input_data;
image_Record.shape = std::vector<int>{batch_size, channel, width, height};
image_Record.type = paddle::PaddleDType::FLOAT32;
return image_Record;
}
TEST(test_resnet50, analysis_gpu_bz1) {
// init input data
std::map<std::string, paddle::test::Record> my_input_data_map;
my_input_data_map["inputs"] = PrepareInput(1);
// init output data
std::map<std::string, paddle::test::Record> infer_output_data,
truth_output_data;
// prepare groudtruth config
paddle_infer::Config config, config_no_ir;
config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
FLAGS_modeldir + "/inference.pdiparams");
config_no_ir.SwitchIrOptim(false);
// prepare inference config
config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
FLAGS_modeldir + "/inference.pdiparams");
// get groudtruth by disbale ir
paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
&truth_output_data, 1);
// get infer results
paddle_infer::services::PredictorPool pred_pool(config, 1);
SingleThreadPrediction(pred_pool.Retrive(0), &my_input_data_map,
&infer_output_data);
// check outputs
CompareRecord(&truth_output_data, &infer_output_data);
std::cout << "finish test" << std::endl;
}
TEST(test_resnet50, trt_fp32_bz2) {
// init input data
std::map<std::string, paddle::test::Record> my_input_data_map;
my_input_data_map["inputs"] = PrepareInput(2);
// init output data
std::map<std::string, paddle::test::Record> infer_output_data,
truth_output_data;
// prepare groudtruth config
paddle_infer::Config config, config_no_ir;
config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
FLAGS_modeldir + "/inference.pdiparams");
config_no_ir.SwitchIrOptim(false);
// prepare inference config
config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
FLAGS_modeldir + "/inference.pdiparams");
config.EnableUseGpu(100, 0);
config.EnableTensorRtEngine(
1 << 20, 2, 3, paddle_infer::PrecisionType::kFloat32, false, false);
// get groudtruth by disbale ir
paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
&truth_output_data, 1);
// get infer results
paddle_infer::services::PredictorPool pred_pool(config, 1);
SingleThreadPrediction(pred_pool.Retrive(0), &my_input_data_map,
&infer_output_data);
// check outputs
CompareRecord(&truth_output_data, &infer_output_data);
std::cout << "finish test" << std::endl;
}
TEST(test_resnet50, serial_diff_batch_trt_fp32) {
int max_batch_size = 5;
// prepare groudtruth config
paddle_infer::Config config, config_no_ir;
config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
FLAGS_modeldir + "/inference.pdiparams");
config_no_ir.SwitchIrOptim(false);
paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
// prepare inference config
config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
FLAGS_modeldir + "/inference.pdiparams");
config.EnableUseGpu(100, 0);
config.EnableTensorRtEngine(1 << 20, max_batch_size, 3,
paddle_infer::PrecisionType::kFloat32, false,
false);
paddle_infer::services::PredictorPool pred_pool(config, 1);
for (int i = 1; i < max_batch_size; i++) {
// init input data
std::map<std::string, paddle::test::Record> my_input_data_map;
my_input_data_map["inputs"] = PrepareInput(i);
// init output data
std::map<std::string, paddle::test::Record> infer_output_data,
truth_output_data;
// get groudtruth by disbale ir
SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
&truth_output_data, 1);
// get infer results
SingleThreadPrediction(pred_pool.Retrive(0), &my_input_data_map,
&infer_output_data);
// check outputs
CompareRecord(&truth_output_data, &infer_output_data);
}
std::cout << "finish test" << std::endl;
}
} // namespace paddle_infer
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
::google::ParseCommandLineFlags(&argc, &argv, true);
return RUN_ALL_TESTS();
}
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <math.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <numeric>
#include <string>
#include <vector>
#include "gflags/gflags.h"
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/include/paddle_inference_api.h"
namespace paddle {
namespace test {
class Record {
public:
std::vector<float> data;
std::vector<int32_t> shape;
paddle::PaddleDType type;
};
void SingleThreadPrediction(paddle_infer::Predictor *predictor,
std::map<std::string, Record> *input_data_map,
std::map<std::string, Record> *output_data_map,
int repeat_times = 2) {
// prepare input tensor
auto input_names = predictor->GetInputNames();
for (const auto & [ key, value ] : *input_data_map) {
auto input_tensor = predictor->GetInputHandle(key);
input_tensor->Reshape(value.shape);
input_tensor->CopyFromCpu(value.data.data());
}
// inference
for (size_t i = 0; i < repeat_times; ++i) {
predictor->Run();
}
// get output data to Record
auto output_names = predictor->GetOutputNames();
for (auto &output_name : output_names) {
Record output_Record;
auto output_tensor = predictor->GetOutputHandle(output_name);
std::vector<int> output_shape = output_tensor->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
switch (output_tensor->type()) {
case paddle::PaddleDType::INT64: {
std::cout << "int64" << std::endl;
std::vector<int64_t> out_data;
output_Record.type = paddle::PaddleDType::INT64;
out_data.resize(out_num);
output_tensor->CopyToCpu(out_data.data());
output_Record.shape = output_shape;
std::vector<float> floatVec(out_data.begin(), out_data.end());
output_Record.data = floatVec;
(*output_data_map)[output_name] = output_Record;
break;
}
case paddle::PaddleDType::FLOAT32: {
std::cout << "float32" << std::endl;
std::vector<float> out_data;
output_Record.type = paddle::PaddleDType::FLOAT32;
out_data.resize(out_num);
output_tensor->CopyToCpu(out_data.data());
output_Record.shape = output_shape;
output_Record.data = out_data;
(*output_data_map)[output_name] = output_Record;
break;
}
case paddle::PaddleDType::INT32: {
std::cout << "int32" << std::endl;
std::vector<int32_t> out_data;
output_Record.type = paddle::PaddleDType::INT32;
out_data.resize(out_num);
output_tensor->CopyToCpu(out_data.data());
output_Record.shape = output_shape;
std::vector<float> floatVec(out_data.begin(), out_data.end());
output_Record.data = floatVec;
(*output_data_map)[output_name] = output_Record;
break;
}
}
}
}
void CompareRecord(std::map<std::string, Record> *truth_output_data,
std::map<std::string, Record> *infer_output_data,
float epislon = 1e-5) {
for (const auto & [ key, value ] : *infer_output_data) {
auto truth_record = (*truth_output_data)[key];
LOG(INFO) << "output name: " << key;
size_t numel = value.data.size() / sizeof(float);
EXPECT_EQ(value.data.size(), truth_record.data.size());
for (size_t i = 0; i < numel; ++i) {
CHECK_LT(fabs(value.data.data()[i] - truth_record.data.data()[i]),
epislon);
}
}
}
} // namespace demo
} // namespace paddle
......@@ -1980,7 +1980,7 @@ EOF
fi
startTime_s=`date +%s`
set +e
cmake .. -DWITH_DISTRIBUTE=OFF -DON_INFER=ON -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME:-Auto};build_error=$?
cmake .. -DWITH_DISTRIBUTE=OFF -DON_INFER=ON -DWITH_TENSORRT=ON -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME:-Auto};build_error=$?
# reset ccache zero stats for collect PR's actual hit rate
ccache -z
......@@ -2026,12 +2026,16 @@ EOF
./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} ${INFERENCE_DEMO_INSTALL_DIR} \
${TENSORRT_INCLUDE_DIR:-/usr/local/TensorRT/include} \
${TENSORRT_LIB_DIR:-/usr/local/TensorRT/lib}
EXIT_CODE=$?
DEMO_EXIT_CODE=$?
./clean.sh
cd ${PADDLE_ROOT}/paddle/fluid/inference/tests/infer_ut
./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} ${INFERENCE_DEMO_INSTALL_DIR} \
${TENSORRT_ROOT_DIR:-/usr}
TEST_EXIT_CODE=$?
fluid_endTime_s=`date +%s`
echo "test_fluid_lib Total Time: $[ $fluid_endTime_s - $fluid_startTime_s ]s"
echo "ipipe_log_param_Test_Fluid_Lib_Total_Time: $[ $fluid_endTime_s - $fluid_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt
./clean.sh
if [[ "$EXIT_CODE" != "0" ]]; then
if [[ "$DEMO_EXIT_CODE" != "0" || "$TEST_EXIT_CODE" != "0" ]]; then
exit 8;
fi
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册