run.sh 9.4 KB
Newer Older
Y
Yan Chunwei 已提交
1
#!/bin/bash
W
Wilber 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

17
set -x
I
iducn 已提交
18 19 20 21
PADDLE_ROOT=$1
TURN_ON_MKL=$2 # use MKL or Openblas
TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
DATA_DIR=$4 # dataset
22 23
USE_TENSORRT=$5
TENSORRT_ROOT_DIR=$6 # TensorRT root dir, default to /usr
24 25
WITH_ONNXRUNTIME=$7
MSVC_STATIC_CRT=$8
26
CUDA_LIB=$9/lib/x64
I
iducn 已提交
27
inference_install_dir=${PADDLE_ROOT}/build/paddle_inference_install_dir
28
WIN_DETECT=$(echo `uname` | grep "Win") # detect current platform
N
nhzlx 已提交
29

I
iducn 已提交
30 31 32
cd `dirname $0`
current_dir=`pwd`
if [ $2 == ON ]; then
33
  # You can export yourself if move the install path
I
iducn 已提交
34 35
  MKL_LIB=${inference_install_dir}/third_party/install/mklml/lib
  export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB}
36
fi
I
iducn 已提交
37
if [ $3 == ON ]; then
38
  use_gpu_list='true false'
Y
Yan Chunwei 已提交
39
else
40 41
  use_gpu_list='false'
fi
L
Luo Tao 已提交
42

43 44 45 46 47 48 49 50 51 52 53 54
mkdir -p $DATA_DIR
cd $DATA_DIR

if [ $7 == ON ]; then
  ONNXRUNTIME_LIB=${inference_install_dir}/third_party/install/onnxruntime/lib
  export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${ONNXRUNTIME_LIB}
  PADDLE2ONNX_LIB=${inference_install_dir}/third_party/install/paddle2onnx/lib
  export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${PADDLE2ONNX_LIB}
  #download model
  mkdir -p MobileNetV2
  cd MobileNetV2
  if [[ -e "MobileNetV2.inference.model.tar.gz" ]]; then
55 56 57 58
    rm -rf MobileNetV2.inference.model.tar.gz
  fi
    # echo "MobileNetV2.inference.model.tar.gz has been downloaded."
  # else
59 60 61 62 63
    if [ $WIN_DETECT != "" ]; then
      wget -q -Y off http://paddle-inference-dist.bj.bcebos.com/MobileNetV2.inference.model.tar.gz
    else
      wget -q --no-proxy http://paddle-inference-dist.bj.bcebos.com/MobileNetV2.inference.model.tar.gz
    fi
64
    tar xzf *.tar.gz
65
  # fi
66 67 68
  cd ..
fi

D
dzhwinter 已提交
69
PREFIX=inference-vis-demos%2F
70
URL_ROOT=http://paddlemodels.bj.bcebos.com/${PREFIX}
D
dzhwinter 已提交
71

L
Luo Tao 已提交
72 73
# download vis_demo data
function download() {
I
iducn 已提交
74 75 76
  dir_name=$1
  mkdir -p $dir_name
  cd $dir_name
D
dzhwinter 已提交
77
  if [[ -e "${PREFIX}${dir_name}.tar.gz" ]]; then
78
    echo "${PREFIX}${dir_name}.tar.gz has been downloaded."
D
dzhwinter 已提交
79
  else
80 81 82 83 84
      if [ $WIN_DETECT != "" ]; then
        wget -q -Y off ${URL_ROOT}$dir_name.tar.gz
      else
        wget -q --no-proxy ${URL_ROOT}$dir_name.tar.gz
      fi
I
iducn 已提交
85
      tar xzf *.tar.gz
D
dzhwinter 已提交
86
  fi
I
iducn 已提交
87
  cd ..
L
Luo Tao 已提交
88
}
89

L
Luo Tao 已提交
90 91
vis_demo_list='se_resnext50 ocr mobilenet'
for vis_demo_name in $vis_demo_list; do
I
iducn 已提交
92
  download $vis_demo_name
L
Luo Tao 已提交
93 94
done

95 96
# download word2vec data
mkdir -p word2vec
I
iducn 已提交
97
cd word2vec
98 99 100 101
if [[ -e "word2vec.inference.model.tar.gz" ]]; then
  echo "word2vec.inference.model.tar.gz has been downloaded."
else
    wget -q http://paddle-inference-dist.bj.bcebos.com/word2vec.inference.model.tar.gz
I
iducn 已提交
102
    tar xzf *.tar.gz
103 104
fi

L
Luo Tao 已提交
105
# compile and test the demo
I
iducn 已提交
106
cd $current_dir
L
Luo Tao 已提交
107
mkdir -p build
I
iducn 已提交
108 109
cd build
rm -rf *
L
Luo Tao 已提交
110

111 112 113
# run all test cases before exit
EXIT_CODE=0

114
for WITH_STATIC_LIB in ON OFF; do
I
iducn 已提交
115
  if [ $(echo `uname` | grep "Win") != "" ]; then
W
Wilber 已提交
116 117
    # TODO(wilber, T8T9): Do we still need to support windows gpu static library
    if [ $TEST_GPU_CPU == ON ] && [ $WITH_STATIC_LIB == ON ]; then
Z
Zhou Wei 已提交
118
      continue
W
Wilber 已提交
119
    fi
120
    # -----simple_on_word2vec on windows-----
121
    cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \
I
iducn 已提交
122
      -DWITH_MKL=$TURN_ON_MKL \
123
      -DDEMO_NAME=simple_on_word2vec \
I
iducn 已提交
124 125
      -DWITH_GPU=$TEST_GPU_CPU \
      -DWITH_STATIC_LIB=$WITH_STATIC_LIB \
126
      -DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \
127 128 129 130
      -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \
      -DCMAKE_BUILD_TYPE=Release \
      -DCUDA_LIB="$CUDA_LIB"
    ninja
131
    for use_gpu in $use_gpu_list; do
132
      ./simple_on_word2vec.exe \
I
iducn 已提交
133 134 135
        --dirname=$DATA_DIR/word2vec/word2vec.inference.model \
        --use_gpu=$use_gpu
      if [ $? -ne 0 ]; then
136 137
        echo "simple_on_word2vec use_gpu:${use_gpu} runs failed " > ${current_dir}/test_summary.txt
        EXIT_CODE=1
138 139 140 141
      fi
    done

    # -----vis_demo on windows-----
I
iducn 已提交
142
    rm -rf *
143
    cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \
I
iducn 已提交
144
      -DWITH_MKL=$TURN_ON_MKL \
145
      -DDEMO_NAME=vis_demo \
I
iducn 已提交
146 147
      -DWITH_GPU=$TEST_GPU_CPU \
      -DWITH_STATIC_LIB=$WITH_STATIC_LIB \
148
      -DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \
149 150 151 152
      -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \
      -DCMAKE_BUILD_TYPE=Release \
      -DCUDA_LIB="$CUDA_LIB"
    ninja
153 154
    for use_gpu in $use_gpu_list; do
      for vis_demo_name in $vis_demo_list; do
155
        ./vis_demo.exe \
I
iducn 已提交
156 157 158 159 160
          --modeldir=$DATA_DIR/$vis_demo_name/model \
          --data=$DATA_DIR/$vis_demo_name/data.txt \
          --refer=$DATA_DIR/$vis_demo_name/result.txt \
          --use_gpu=$use_gpu
        if [ $? -ne 0 ]; then
161 162
          echo "vis demo $vis_demo_name use_gpu:${use_gpu} runs failed " >> ${current_dir}/test_summary.txt
          EXIT_CODE=1
163 164 165
        fi
      done
    done
166
    
167 168 169
    # --------tensorrt mobilenet on windows------
    if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
      rm -rf *
170
      cmake .. -GNinja -DPADDLE_LIB=${inference_install_dir} \
171 172 173 174 175 176
        -DWITH_MKL=$TURN_ON_MKL \
        -DDEMO_NAME=trt_mobilenet_demo \
        -DWITH_GPU=$TEST_GPU_CPU \
        -DWITH_STATIC_LIB=$WITH_STATIC_LIB \
        -DMSVC_STATIC_CRT=$MSVC_STATIC_CRT \
        -DUSE_TENSORRT=$USE_TENSORRT \
177
        -DTENSORRT_ROOT=$TENSORRT_ROOT_DIR \
178 179 180 181 182
        -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME \
        -DCMAKE_BUILD_TYPE=Release \
        -DCUDA_LIB="$CUDA_LIB"
      ninja
      ./trt_mobilenet_demo.exe \
183 184 185 186
        --modeldir=$DATA_DIR/mobilenet/model \
        --data=$DATA_DIR/mobilenet/data.txt \
        --refer=$DATA_DIR/mobilenet/result.txt 
      if [ $? -ne 0 ]; then
187 188
        echo "trt_mobilenet_demo runs failed." >> ${current_dir}/test_summary.txt
        EXIT_CODE=1
189 190
      fi
    fi
191
  else
192
    # -----simple_on_word2vec on linux/mac-----
I
iducn 已提交
193 194 195
    rm -rf *
    cmake .. -DPADDLE_LIB=${inference_install_dir} \
      -DWITH_MKL=$TURN_ON_MKL \
196
      -DDEMO_NAME=simple_on_word2vec \
I
iducn 已提交
197
      -DWITH_GPU=$TEST_GPU_CPU \
198 199
      -DWITH_STATIC_LIB=$WITH_STATIC_LIB \
      -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME
I
iducn 已提交
200 201 202
    make -j$(nproc)
    word2vec_model=$DATA_DIR'/word2vec/word2vec.inference.model'
    if [ -d $word2vec_model ]; then
203 204
      for use_gpu in $use_gpu_list; do
        ./simple_on_word2vec \
I
iducn 已提交
205 206 207
          --dirname=$DATA_DIR/word2vec/word2vec.inference.model \
          --use_gpu=$use_gpu
        if [ $? -ne 0 ]; then
208 209
          echo "simple_on_word2vec use_gpu:${use_gpu} runs failed " >> ${current_dir}/test_summary.txt
          EXIT_CODE=1
210 211 212 213
        fi
      done
    fi
    # ---------vis_demo on linux/mac---------
I
iducn 已提交
214 215 216
    rm -rf *
    cmake .. -DPADDLE_LIB=${inference_install_dir} \
      -DWITH_MKL=$TURN_ON_MKL \
217
      -DDEMO_NAME=vis_demo \
I
iducn 已提交
218
      -DWITH_GPU=$TEST_GPU_CPU \
219 220
      -DWITH_STATIC_LIB=$WITH_STATIC_LIB \
      -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME
I
iducn 已提交
221
    make -j$(nproc)
222 223 224
    for use_gpu in $use_gpu_list; do
      for vis_demo_name in $vis_demo_list; do
        ./vis_demo \
I
iducn 已提交
225 226 227 228 229
          --modeldir=$DATA_DIR/$vis_demo_name/model \
          --data=$DATA_DIR/$vis_demo_name/data.txt \
          --refer=$DATA_DIR/$vis_demo_name/result.txt \
          --use_gpu=$use_gpu
        if [ $? -ne 0 ]; then
230 231
          echo "vis demo $vis_demo_name use_gpu:${use_gpu} runs failed " >> ${current_dir}/test_summary.txt
          EXIT_CODE=1
232 233 234 235
        fi
      done
    done
    # --------tensorrt mobilenet on linux/mac------
I
iducn 已提交
236 237 238 239
    if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
      rm -rf *
      cmake .. -DPADDLE_LIB=${inference_install_dir} \
        -DWITH_MKL=$TURN_ON_MKL \
240
        -DDEMO_NAME=trt_mobilenet_demo \
I
iducn 已提交
241 242 243
        -DWITH_GPU=$TEST_GPU_CPU \
        -DWITH_STATIC_LIB=$WITH_STATIC_LIB \
        -DUSE_TENSORRT=$USE_TENSORRT \
244 245
        -DTENSORRT_ROOT=$TENSORRT_ROOT_DIR \
        -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME
I
iducn 已提交
246
      make -j$(nproc)
247
      ./trt_mobilenet_demo \
I
iducn 已提交
248 249 250 251
        --modeldir=$DATA_DIR/mobilenet/model \
        --data=$DATA_DIR/mobilenet/data.txt \
        --refer=$DATA_DIR/mobilenet/result.txt 
      if [ $? -ne 0 ]; then
252 253
        echo "trt_mobilenet_demo runs failed " >> ${current_dir}/test_summary.txt
        EXIT_CODE=1
254
      fi
N
nhzlx 已提交
255
    fi
256 257 258 259 260 261 262 263 264 265 266 267 268 269

    # --------onnxruntime mobilenetv2 on linux/mac------
    if [ $WITH_ONNXRUNTIME == ON ]; then
      rm -rf *
      cmake .. -DPADDLE_LIB=${inference_install_dir} \
        -DWITH_MKL=$TURN_ON_MKL \
        -DDEMO_NAME=onnxruntime_mobilenet_demo \
        -DWITH_GPU=$TEST_GPU_CPU \
        -DWITH_STATIC_LIB=$WITH_STATIC_LIB \
        -DUSE_TENSORRT=$USE_TENSORRT \
        -DTENSORRT_ROOT=$TENSORRT_ROOT_DIR \
        -DWITH_ONNXRUNTIME=$WITH_ONNXRUNTIME
      make -j$(nproc)
      ./onnxruntime_mobilenet_demo \
270 271
        --modeldir=$DATA_DIR/MobileNetV2/MobileNetV2 \
        --data=$DATA_DIR/MobileNetV2/MobileNetV2/data.txt
272
      if [ $? -ne 0 ]; then
273 274
        echo "onnxruntime_mobilenet_demo runs failed " >> ${current_dir}/test_summary.txt
        EXIT_CODE=1
275 276
      fi
    fi
277 278
  fi
done
279

280
set +x
281 282 283 284 285 286 287 288 289 290 291 292 293 294

if [[ -f ${current_dir}/test_summary.txt ]];then
  echo " "
  echo "Summary demo_ci Failed Tests ..."
  echo "=====================test summary======================"
  echo "The following tests Failed: "
  cat ${current_dir}/test_summary.txt
  echo "========================================================"
  echo " "
fi

set -x

exit ${EXIT_CODE}