未验证 提交 058c52b6 编写于 作者: S Sing_chan 提交者: GitHub

[windows CI]open inference_ut in windows-inference pipeline (#43446)

* open inference_ut;test=windows_ci_inference

* inference_ut need onnx;test=windows_ci_inference

* disable trt_split_converter_test; use higher parallel level

* too high parallel will cause ut timeout
上级 c6421019
......@@ -680,7 +680,12 @@ pip install requests
set PATH=%THIRD_PARTY_PATH:/=\%\install\openblas\lib;%THIRD_PARTY_PATH:/=\%\install\openblas\bin;^
%THIRD_PARTY_PATH:/=\%\install\zlib\bin;%THIRD_PARTY_PATH:/=\%\install\mklml\lib;^
%THIRD_PARTY_PATH:/=\%\install\mkldnn\bin;%THIRD_PARTY_PATH:/=\%\install\warpctc\bin;%PATH%
%THIRD_PARTY_PATH:/=\%\install\mkldnn\bin;%THIRD_PARTY_PATH:/=\%\install\warpctc\bin;^
%THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib;%THIRD_PARTY_PATH:/=\%\install\paddle2onnx\lib;^
%work_dir%\%BUILD_DIR%\paddle\fluid\inference;%PATH%
REM TODO: make ut find .dll in install\onnxruntime\lib
xcopy %THIRD_PARTY_PATH:/=\%\install\onnxruntime\lib\onnxruntime.dll %work_dir%\%BUILD_DIR%\paddle\fluid\inference\tests\api\ /Y
if "%WITH_GPU%"=="ON" (
call:parallel_test_base_gpu
......
......@@ -85,6 +85,7 @@ disable_win_inference_api_test="^trt_quant_int8_yolov3_r50_test$|\
^lite_resnet50_test$|\
^test_trt_dynamic_shape_transformer_prune$|\
^lite_mul_model_test$|\
^trt_split_converter_test$|\
^paddle_infer_api_copy_tensor_tester$"
......@@ -191,10 +192,6 @@ if [ -f "$PADDLE_ROOT/added_ut" ];then
echo "========================================"
exit 8;
fi
if nvcc --version | grep 11.2; then
echo "Only test added_ut temporarily when running in CI-Windows-inference of CUDA 11.2."
exit 0;
fi
fi
set -e
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册