未验证 提交 26213a77 编写于 作者: T tianshuo78520a 提交者: GitHub

Fix Inference CI CPU/GPU (#34931)

* notest;test=gpu-inference

* notest;test=gpu-inference

* notest;test=gpu-inference

* notest;test=gpu-inference

* fix error

* notest;test=gpu-inference

* notest;test=gpu-inference

* notest;test=gpu-inference

* test=gpu-inference
上级 c4e05e1c
......@@ -21,7 +21,7 @@ if [ ! -d mobilenetv1 ]; then
fi
# 2. set LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$PWD/paddle_inference_c/paddle/lib
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/paddle_inference_c/third_party/install/mklml/lib/:$PWD/paddle_inference_c/third_party/install/mkldnn/lib/:$PWD/paddle_inference_c/paddle/lib/
# 3. go test
go test -v ./...
......@@ -2422,6 +2422,11 @@ function main() {
python ${PADDLE_ROOT}/tools/remove_grad_op_and_kernel.py
gen_fluid_lib ${parallel_number}
;;
gpu_inference)
test_fluid_lib
test_go_inference_api
check_approvals_of_unittest 3
;;
test_train)
gen_fluid_lib ${parallel_number}
test_fluid_lib_train
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册