diff --git a/deploy/cpp/CMakeLists.txt b/deploy/cpp/CMakeLists.txt index 4f148869f84e42fbc6bdb29ba42f5a9b274b2397..4b11eec62c91c9d2bd8265a71cc9219ab60d1db6 100755 --- a/deploy/cpp/CMakeLists.txt +++ b/deploy/cpp/CMakeLists.txt @@ -1,4 +1,5 @@ project(clas_system CXX C) +cmake_minimum_required(VERSION 3.14) option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF) @@ -13,7 +14,6 @@ SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT") set(DEMO_NAME "clas_system") - macro(safe_set_static_flag) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE @@ -198,6 +198,10 @@ endif() set(DEPS ${DEPS} ${OpenCV_LIBS}) +include(FetchContent) +include(external-cmake/auto-log.cmake) +include_directories(${FETCHCONTENT_BASE_DIR}/extern_autolog-src) + AUX_SOURCE_DIRECTORY(./src SRCS) add_executable(${DEMO_NAME} ${SRCS}) diff --git a/deploy/cpp/external-cmake/auto-log.cmake b/deploy/cpp/external-cmake/auto-log.cmake new file mode 100644 index 0000000000000000000000000000000000000000..9be9c2fb3b6d61207fad0d9bf216f4b9ad715fe3 --- /dev/null +++ b/deploy/cpp/external-cmake/auto-log.cmake @@ -0,0 +1,12 @@ +find_package(Git REQUIRED) +include(FetchContent) + +set(FETCHCONTENT_BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/third-party") + +FetchContent_Declare( + extern_Autolog + PREFIX autolog + GIT_REPOSITORY https://github.com/LDOUBLEV/AutoLog.git + GIT_TAG main +) +FetchContent_MakeAvailable(extern_Autolog) diff --git a/deploy/cpp/include/cls.h b/deploy/cpp/include/cls.h index 3cc5dcc169f6849152a184860042b874b29b4700..600cffbf1838142eb30365bb9016f9ab3a7663e7 100644 --- a/deploy/cpp/include/cls.h +++ b/deploy/cpp/include/cls.h @@ -61,7 +61,7 @@ public: void LoadModel(const std::string &model_path, const std::string ¶ms_path); // Run predictor - double Run(cv::Mat &img); + double Run(cv::Mat &img, std::vector *times); private: std::shared_ptr predictor_; diff --git a/deploy/cpp/include/cls_config.h b/deploy/cpp/include/cls_config.h index 7377573c332a77bdee237181ecab342414e07bf9..d74bb7b4d360fc969fd468680d03f3091f9e80d1 100644 --- a/deploy/cpp/include/cls_config.h +++ b/deploy/cpp/include/cls_config.h @@ -1,4 +1,4 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,8 +36,7 @@ public: this->gpu_mem = stoi(config_map_["gpu_mem"]); - this->cpu_math_library_num_threads = - stoi(config_map_["cpu_math_library_num_threads"]); + this->cpu_threads = stoi(config_map_["cpu_threads"]); this->use_mkldnn = bool(stoi(config_map_["use_mkldnn"])); @@ -51,6 +50,8 @@ public: this->resize_short_size = stoi(config_map_["resize_short_size"]); this->crop_size = stoi(config_map_["crop_size"]); + + this->benchmark = bool(stoi(config_map_["benchmark"])); } bool use_gpu = false; @@ -59,12 +60,13 @@ public: int gpu_mem = 4000; - int cpu_math_library_num_threads = 1; + int cpu_threads = 1; bool use_mkldnn = false; bool use_tensorrt = false; bool use_fp16 = false; + bool benchmark = false; std::string cls_model_path; diff --git a/deploy/cpp/src/cls.cpp b/deploy/cpp/src/cls.cpp index d0fa21f7db0763308bbb3175ce3106757f90765c..6ce09e762856c04fba687d6c91023794731b0884 100644 --- a/deploy/cpp/src/cls.cpp +++ b/deploy/cpp/src/cls.cpp @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include #include namespace PaddleClas { @@ -53,11 +52,12 @@ void Classifier::LoadModel(const std::string &model_path, this->predictor_ = CreatePredictor(config); } -double Classifier::Run(cv::Mat &img) { +double Classifier::Run(cv::Mat &img, std::vector *times) { cv::Mat srcimg; cv::Mat resize_img; img.copyTo(srcimg); + auto preprocess_start = std::chrono::system_clock::now(); this->resize_op_.Run(img, resize_img, this->resize_short_size_); this->crop_op_.Run(resize_img, this->crop_size_); @@ -70,7 +70,9 @@ double Classifier::Run(cv::Mat &img) { auto input_names = this->predictor_->GetInputNames(); auto input_t = this->predictor_->GetInputHandle(input_names[0]); input_t->Reshape({1, 3, resize_img.rows, resize_img.cols}); - auto start = std::chrono::system_clock::now(); + auto preprocess_end = std::chrono::system_clock::now(); + + auto infer_start = std::chrono::system_clock::now(); input_t->CopyFromCpu(input.data()); this->predictor_->Run(); @@ -83,21 +85,29 @@ double Classifier::Run(cv::Mat &img) { out_data.resize(out_num); output_t->CopyToCpu(out_data.data()); - auto end = std::chrono::system_clock::now(); - auto duration = - std::chrono::duration_cast(end - start); - double cost_time = double(duration.count()) * - std::chrono::microseconds::period::num / - std::chrono::microseconds::period::den; + auto infer_end = std::chrono::system_clock::now(); + auto postprocess_start = std::chrono::system_clock::now(); int maxPosition = max_element(out_data.begin(), out_data.end()) - out_data.begin(); + auto postprocess_end = std::chrono::system_clock::now(); + + std::chrono::duration preprocess_diff = + preprocess_end - preprocess_start; + times->push_back(double(preprocess_diff.count() * 1000)); + std::chrono::duration inference_diff = infer_end - infer_start; + double inference_cost_time = double(inference_diff.count() * 1000); + times->push_back(inference_cost_time); + std::chrono::duration postprocess_diff = + postprocess_end - postprocess_start; + times->push_back(double(postprocess_diff.count() * 1000)); + std::cout << "result: " << std::endl; std::cout << "\tclass id: " << maxPosition << std::endl; std::cout << std::fixed << std::setprecision(10) << "\tscore: " << double(out_data[maxPosition]) << std::endl; - return cost_time; + return inference_cost_time; } } // namespace PaddleClas diff --git a/deploy/cpp/src/main.cpp b/deploy/cpp/src/main.cpp index 0dda50819b711300d44ce4b7f3996ac0788d6d2b..4fc191b3cdab748cbcc54f3f5136a36ef7bcca2b 100644 --- a/deploy/cpp/src/main.cpp +++ b/deploy/cpp/src/main.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ #include #include +#include #include #include @@ -61,11 +62,12 @@ int main(int argc, char **argv) { Classifier classifier(config.cls_model_path, config.cls_params_path, config.use_gpu, config.gpu_id, config.gpu_mem, - config.cpu_math_library_num_threads, config.use_mkldnn, + config.cpu_threads, config.use_mkldnn, config.use_tensorrt, config.use_fp16, config.resize_short_size, config.crop_size); double elapsed_time = 0.0; + std::vector cls_times; int warmup_iter = img_files_list.size() > 5 ? 5 : 0; for (int idx = 0; idx < img_files_list.size(); ++idx) { std::string img_path = img_files_list[idx]; @@ -78,7 +80,7 @@ int main(int argc, char **argv) { cv::cvtColor(srcimg, srcimg, cv::COLOR_BGR2RGB); - double run_time = classifier.Run(srcimg); + double run_time = classifier.Run(srcimg, &cls_times); if (idx >= warmup_iter) { elapsed_time += run_time; std::cout << "Current image path: " << img_path << std::endl; @@ -90,5 +92,16 @@ int main(int argc, char **argv) { } } + std::string presion = "fp32"; + + if (config.use_fp16) + presion = "fp16"; + if (config.benchmark) { + AutoLogger autolog("Classification", config.use_gpu, config.use_tensorrt, + config.use_mkldnn, config.cpu_threads, 1, + "1, 3, 224, 224", presion, cls_times, + img_files_list.size()); + autolog.report(); + } return 0; } diff --git a/deploy/cpp/tools/build.sh b/deploy/cpp/tools/build.sh index ad6a727f0ce05ba21f536b6dbaa171ade8b5335b..0a3aa044c5a822b5ab4e082ec699ceaf0de9a560 100755 --- a/deploy/cpp/tools/build.sh +++ b/deploy/cpp/tools/build.sh @@ -1,5 +1,5 @@ -OPENCV_DIR=/PaddleClas/opencv-3.4.7/opencv3/ -LIB_DIR=/PaddleClas/fluid_inference/ +OPENCV_DIR=/work/project/project/cpp_infer/opencv-3.4.7/opencv3 +LIB_DIR=/work/project/project/cpp_infer/paddle_inference/ CUDA_LIB_DIR=/usr/local/cuda/lib64 CUDNN_LIB_DIR=/usr/lib/x86_64-linux-gnu/ diff --git a/deploy/cpp/tools/config.txt b/deploy/cpp/tools/config.txt index b0becad61c415bb63958eb20f0f96fe4edb81da5..0d915a91aa2955ea943b33aa3dafbdfe3dd49946 100755 --- a/deploy/cpp/tools/config.txt +++ b/deploy/cpp/tools/config.txt @@ -2,7 +2,7 @@ use_gpu 0 gpu_id 0 gpu_mem 4000 -cpu_math_library_num_threads 10 +cpu_threads 10 use_mkldnn 1 use_tensorrt 0 use_fp16 0 @@ -12,3 +12,6 @@ cls_model_path /PaddleClas/inference/cls_infer.pdmodel cls_params_path /PaddleClas/inference/cls_infer.pdiparams resize_short_size 256 crop_size 224 + +# for log env info +benchmark 0 diff --git a/deploy/hubserving/clas/module.py b/deploy/hubserving/clas/module.py index dd8bf5df66674e5bcc387b1166b32eb72df1fb15..98ec1d9012b0205fc18fffeaa513c6e312c19c6b 100644 --- a/deploy/hubserving/clas/module.py +++ b/deploy/hubserving/clas/module.py @@ -76,8 +76,7 @@ class ClasSystem(nn.Layer): starttime = time.time() outputs = self.cls_predictor.predict(inputs) elapse = time.time() - starttime - preds = self.cls_predictor.postprocess(outputs) - return {"prediction": preds, "elapse": elapse} + return {"prediction": outputs, "elapse": elapse} @serving def serving_method(self, images, revert_params): diff --git a/deploy/paddleserving/README.md b/deploy/paddleserving/README.md new file mode 100644 index 0000000000000000000000000000000000000000..75eb3e35b8ffa03bc6ae69db42fffb33bdccaf14 --- /dev/null +++ b/deploy/paddleserving/README.md @@ -0,0 +1,172 @@ +# PaddleClas Pipeline WebService + +(English|[简体中文](./README_CN.md)) + +PaddleClas provides two service deployment methods: +- Based on **PaddleHub Serving**: Code path is "`./deploy/hubserving`". Please refer to the [tutorial](../../deploy/hubserving/readme_en.md) +- Based on **PaddleServing**: Code path is "`./deploy/paddleserving`". Please follow this tutorial. + +# Service deployment based on PaddleServing + +This document will introduce how to use the [PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README.md) to deploy the ResNet50_vd model as a pipeline online service. + +Some Key Features of Paddle Serving: +- Integrate with Paddle training pipeline seamlessly, most paddle models can be deployed with one line command. +- Industrial serving features supported, such as models management, online loading, online A/B testing etc. +- Highly concurrent and efficient communication between clients and servers supported. + +The introduction and tutorial of Paddle Serving service deployment framework reference [document](https://github.com/PaddlePaddle/Serving/blob/develop/README.md). + + +## Contents +- [Environmental preparation](#environmental-preparation) +- [Model conversion](#model-conversion) +- [Paddle Serving pipeline deployment](#paddle-serving-pipeline-deployment) +- [FAQ](#faq) + + +## Environmental preparation + +PaddleClas operating environment and PaddleServing operating environment are needed. + +1. Please prepare PaddleClas operating environment reference [link](../../docs/zh_CN/tutorials/install.md). + Download the corresponding paddle whl package according to the environment, it is recommended to install version 2.1.0. + +2. The steps of PaddleServing operating environment prepare are as follows: + + Install serving which used to start the service + ``` + pip3 install paddle-serving-server==0.6.1 # for CPU + pip3 install paddle-serving-server-gpu==0.6.1 # for GPU + # Other GPU environments need to confirm the environment and then choose to execute the following commands + pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 + pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 + ``` + +3. Install the client to send requests to the service + In [download link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) find the client installation package corresponding to the python version. + The python3.7 version is recommended here: + + ``` + wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl + pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl + ``` + +4. Install serving-app + ``` + pip3 install paddle-serving-app==0.6.1 + ``` + + **note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md). + + + +## Model conversion +When using PaddleServing for service deployment, you need to convert the saved inference model into a serving model that is easy to deploy. + +Firstly, download the inference model of ResNet50_vd +``` +# Download and unzip the ResNet50_vd model +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar +``` + +Then, you can use installed paddle_serving_client tool to convert inference model to mobile model. +``` +# ResNet50_vd model conversion +python3 -m paddle_serving_client.convert --dirname ./ResNet50_vd_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./ResNet50_vd_serving/ \ + --serving_client ./ResNet50_vd_client/ +``` + +After the ResNet50_vd inference model is converted, there will be additional folders of `ResNet50_vd_serving` and `ResNet50_vd_client` in the current folder, with the following format: +``` +|- ResNet50_vd_client/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- ResNet50_vd_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt +``` + +Once you have the model file for deployment, you need to change the alias name in `serving_server_conf.prototxt`: Change `alias_name` in `feed_var` to `image`, change `alias_name` in `fetch_var` to `prediction`, +The modified serving_server_conf.prototxt file is as follows: +``` +feed_var { + name: "inputs" + alias_name: "image" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "prediction" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +``` + + +## Paddle Serving pipeline deployment + +1. Download the PaddleClas code, if you have already downloaded it, you can skip this step. + ``` + git clone https://github.com/PaddlePaddle/PaddleClas + + # Enter the working directory + cd PaddleClas/deploy/paddleserving/ + ``` + + The paddleserving directory contains the code to start the pipeline service and send prediction requests, including: + ``` + __init__.py + config.yml # configuration file of starting the service + pipeline_http_client.py # script to send pipeline prediction request by http + pipeline_rpc_client.py # script to send pipeline prediction request by rpc + resnet50_web_service.py # start the script of the pipeline server + ``` + +2. Run the following command to start the service. + ``` + # Start the service and save the running log in log.txt + python3 classification_web_service.py &>log.txt & + ``` + After the service is successfully started, a log similar to the following will be printed in log.txt + ![](./imgs/start_server.png) + +3. Send service request + ``` + python3 pipeline_http_client.py + ``` + After successfully running, the predicted result of the model will be printed in the cmd window. An example of the result is: + ![](./imgs/results.png) + + Adjust the number of concurrency in config.yml to get the largest QPS. + + ``` + op: + concurrency: 8 + ... + ``` + + Multiple service requests can be sent at the same time if necessary. + + The predicted performance data will be automatically written into the `PipelineServingLogs/pipeline.tracer` file. + + +## FAQ +**Q1**: No result return after sending the request. + +**A1**: Do not set the proxy when starting the service and sending the request. You can close the proxy before starting the service and before sending the request. The command to close the proxy is: +``` +unset https_proxy +unset http_proxy +``` diff --git a/deploy/paddleserving/README_CN.md b/deploy/paddleserving/README_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..3394ae5b5a75c774858fb50e429d083f8a19fc07 --- /dev/null +++ b/deploy/paddleserving/README_CN.md @@ -0,0 +1,167 @@ +# PaddleClas 服务化部署 + +([English](./README.md)|简体中文) + +PaddleClas提供2种服务部署方式: +- 基于PaddleHub Serving的部署:代码路径为"`./deploy/hubserving`",使用方法参考[文档](../../deploy/hubserving/readme.md); +- 基于PaddleServing的部署:代码路径为"`./deploy/paddleserving`",按照本教程使用。 + +# 基于PaddleServing的服务部署 + +本文档以经典的ResNet50_vd模型为例,介绍如何使用[PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)工具部署PaddleClas +动态图模型的pipeline在线服务。 + +相比较于hubserving部署,PaddleServing具备以下优点: +- 支持客户端和服务端之间高并发和高效通信 +- 支持 工业级的服务能力 例如模型管理,在线加载,在线A/B测试等 +- 支持 多种编程语言 开发客户端,例如C++, Python和Java + +更多有关PaddleServing服务化部署框架介绍和使用教程参考[文档](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)。 + +## 目录 +- [环境准备](#环境准备) +- [模型转换](#模型转换) +- [Paddle Serving pipeline部署](#部署) +- [FAQ](#FAQ) + + +## 环境准备 + +需要准备PaddleClas的运行环境和PaddleServing的运行环境。 + +- 准备PaddleClas的[运行环境](../../docs/zh_CN/tutorials/install.md), 根据环境下载对应的paddle whl包,推荐安装2.1.0版本 + +- 准备PaddleServing的运行环境,步骤如下 + +1. 安装serving,用于启动服务 + ``` + pip3 install paddle-serving-server==0.6.1 # for CPU + pip3 install paddle-serving-server-gpu==0.6.1 # for GPU + # 其他GPU环境需要确认环境再选择执行如下命令 + pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 + pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 + ``` + +2. 安装client,用于向服务发送请求 + 在[下载链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)中找到对应python版本的client安装包,这里推荐python3.7版本: + + ``` + wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl + pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl + ``` + +3. 安装serving-app + ``` + pip3 install paddle-serving-app==0.6.1 + ``` + **Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)。 + + +## 模型转换 + +使用PaddleServing做服务化部署时,需要将保存的inference模型转换为serving易于部署的模型。 + +首先,下载ResNet50_vd的inference模型 +``` +# 下载并解压ResNet50_vd模型 +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar +``` + +接下来,用安装的paddle_serving_client把下载的inference模型转换成易于server部署的模型格式。 + +``` +# 转换ResNet50_vd模型 +python3 -m paddle_serving_client.convert --dirname ./ResNet50_vd_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./ResNet50_vd_serving/ \ + --serving_client ./ResNet50_vd_client/ +``` +ResNet50_vd推理模型转换完成后,会在当前文件夹多出`ResNet50_vd_serving` 和`ResNet50_vd_client`的文件夹,具备如下格式: +``` +|- ResNet50_vd_client/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- ResNet50_vd_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt + +``` +得到模型文件之后,需要修改serving_server_conf.prototxt中的alias名字: 将`feed_var`中的`alias_name`改为`image`, 将`fetch_var`中的`alias_name`改为`prediction`, +修改后的serving_server_conf.prototxt内容如下: +``` +feed_var { + name: "inputs" + alias_name: "image" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "prediction" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +``` + + +## Paddle Serving pipeline部署 + +1. 下载PaddleClas代码,若已下载可跳过此步骤 + ``` + git clone https://github.com/PaddlePaddle/PaddleClas + + # 进入到工作目录 + cd PaddleClas/deploy/paddleserving/ + ``` + paddleserving目录包含启动pipeline服务和发送预测请求的代码,包括: + ``` + __init__.py + config.yml # 启动服务的配置文件 + pipeline_http_client.py # http方式发送pipeline预测请求的脚本 + pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本 + resnet50_web_service.py # 启动pipeline服务端的脚本 + ``` + +2. 启动服务可运行如下命令: + ``` + # 启动服务,运行日志保存在log.txt + python3 classification_web_service.py &>log.txt & + ``` + 成功启动服务后,log.txt中会打印类似如下日志 + ![](./imgs/start_server.png) + +3. 发送服务请求: + ``` + python3 pipeline_http_client.py + ``` + 成功运行后,模型预测的结果会打印在cmd窗口中,结果示例为: + ![](./imgs/results.png) + + 调整 config.yml 中的并发个数可以获得最大的QPS + ``` + op: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 8 + ... + ``` + 有需要的话可以同时发送多个服务请求 + + 预测性能数据会被自动写入 `PipelineServingLogs/pipeline.tracer` 文件中。 + + +## FAQ +**Q1**: 发送请求后没有结果返回或者提示输出解码报错 + +**A1**: 启动服务和发送请求时不要设置代理,可以在启动服务前和发送请求前关闭代理,关闭代理的命令是: +``` +unset https_proxy +unset http_proxy +``` diff --git a/deploy/paddleserving/__init__.py b/deploy/paddleserving/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deploy/paddleserving/classification_web_service.py b/deploy/paddleserving/classification_web_service.py new file mode 100644 index 0000000000000000000000000000000000000000..6c353eb102fd8c2914c71e6a3c946431823b8c32 --- /dev/null +++ b/deploy/paddleserving/classification_web_service.py @@ -0,0 +1,73 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from paddle_serving_app.reader import Sequential, URL2Image, Resize, CenterCrop, RGB2BGR, Transpose, Div, Normalize, Base64ToImage +try: + from paddle_serving_server_gpu.web_service import WebService, Op +except ImportError: + from paddle_serving_server.web_service import WebService, Op +import logging +import numpy as np +import base64, cv2 + +class ImagenetOp(Op): + def init_op(self): + self.seq = Sequential([ + Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), + Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], + True) + ]) + self.label_dict = {} + label_idx = 0 + with open("imagenet.label") as fin: + for line in fin: + self.label_dict[label_idx] = line.strip() + label_idx += 1 + + def preprocess(self, input_dicts, data_id, log_id): + (_, input_dict), = input_dicts.items() + batch_size = len(input_dict.keys()) + imgs = [] + for key in input_dict.keys(): + data = base64.b64decode(input_dict[key].encode('utf8')) + data = np.fromstring(data, np.uint8) + im = cv2.imdecode(data, cv2.IMREAD_COLOR) + img = self.seq(im) + imgs.append(img[np.newaxis, :].copy()) + input_imgs = np.concatenate(imgs, axis=0) + return {"image": input_imgs}, False, None, "" + + def postprocess(self, input_dicts, fetch_dict, log_id): + score_list = fetch_dict["prediction"] + result = {"label": [], "prob": []} + for score in score_list: + score = score.tolist() + max_score = max(score) + result["label"].append(self.label_dict[score.index(max_score)] + .strip().replace(",", "")) + result["prob"].append(max_score) + result["label"] = str(result["label"]) + result["prob"] = str(result["prob"]) + return result, None, "" + + +class ImageService(WebService): + def get_pipeline_response(self, read_op): + image_op = ImagenetOp(name="imagenet", input_ops=[read_op]) + return image_op + + +uci_service = ImageService(name="imagenet") +uci_service.prepare_pipeline_config("config.yml") +uci_service.run_service() diff --git a/deploy/paddleserving/config.yml b/deploy/paddleserving/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..d9f464dd093d5a3d0ac34a61f4af17e3792fcd86 --- /dev/null +++ b/deploy/paddleserving/config.yml @@ -0,0 +1,33 @@ +#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG +##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num +worker_num: 1 + +#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port +http_port: 18080 +rpc_port: 9993 + +dag: + #op资源类型, True, 为线程模型;False,为进程模型 + is_thread_op: False +op: + imagenet: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 1 + + #当op配置没有server_endpoints时,从local_service_conf读取本地服务配置 + local_service_conf: + + #uci模型路径 + model_config: ResNet50_vd_serving + + #计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu + device_type: 1 + + #计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡 + devices: "0" # "0,1" + + #client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测 + client_type: local_predictor + + #Fetch结果列表,以client_config中fetch_var的alias_name为准 + fetch_list: ["prediction"] diff --git a/deploy/paddleserving/cpu_utilization.py b/deploy/paddleserving/cpu_utilization.py new file mode 100644 index 0000000000000000000000000000000000000000..984c72370a3551722b21b8e06d418efd832192ef --- /dev/null +++ b/deploy/paddleserving/cpu_utilization.py @@ -0,0 +1,4 @@ +import psutil +cpu_utilization=psutil.cpu_percent(1,False) +print('CPU_UTILIZATION:', cpu_utilization) + diff --git a/deploy/paddleserving/daisy.jpg b/deploy/paddleserving/daisy.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7edeca63e5f32e68550ef720d81f59df58a8eabc Binary files /dev/null and b/deploy/paddleserving/daisy.jpg differ diff --git a/deploy/paddleserving/image_http_client.py b/deploy/paddleserving/image_http_client.py deleted file mode 100644 index 4e33c4a7e4bb60b2937b9a0073cb16c74fe4d911..0000000000000000000000000000000000000000 --- a/deploy/paddleserving/image_http_client.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import requests -import base64 -import json -import sys -import numpy as np - -py_version = sys.version_info[0] - - -def predict(image_path, server): - - with open(image_path, "rb") as f: - image = base64.b64encode(f.read()).decode("utf-8") - req = json.dumps({"feed": [{"image": image}], "fetch": ["prediction"]}) - r = requests.post( - server, data=req, headers={"Content-Type": "application/json"}) - try: - pred = r.json()["result"]["prediction"][0] - cls_id = np.argmax(pred) - score = pred[cls_id] - pred = {"cls_id": cls_id, "score": score} - return pred - except ValueError: - print(r.text) - return r - - -if __name__ == "__main__": - server = "http://127.0.0.1:{}/image/prediction".format(sys.argv[1]) - image_file = sys.argv[2] - res = predict(image_file, server) - print("res:", res) diff --git a/deploy/paddleserving/image_service_cpu.py b/deploy/paddleserving/image_service_cpu.py deleted file mode 100644 index 92f67d3220670ffa880ff663ed887984325a0723..0000000000000000000000000000000000000000 --- a/deploy/paddleserving/image_service_cpu.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import base64 -from paddle_serving_server.web_service import WebService -import utils - - -class ImageService(WebService): - def __init__(self, name): - super(ImageService, self).__init__(name=name) - self.operators = self.create_operators() - - def create_operators(self): - size = 224 - img_mean = [0.485, 0.456, 0.406] - img_std = [0.229, 0.224, 0.225] - img_scale = 1.0 / 255.0 - decode_op = utils.DecodeImage() - resize_op = utils.ResizeImage(resize_short=256) - crop_op = utils.CropImage(size=(size, size)) - normalize_op = utils.NormalizeImage( - scale=img_scale, mean=img_mean, std=img_std) - totensor_op = utils.ToTensor() - return [decode_op, resize_op, crop_op, normalize_op, totensor_op] - - def _process_image(self, data, ops): - for op in ops: - data = op(data) - return data - - def preprocess(self, feed={}, fetch=[]): - feed_batch = [] - for ins in feed: - if "image" not in ins: - raise ("feed data error!") - sample = base64.b64decode(ins["image"]) - img = self._process_image(sample, self.operators) - feed_batch.append({"image": img}) - return feed_batch, fetch - - -image_service = ImageService(name="image") -image_service.load_model_config(sys.argv[1]) -image_service.prepare_server( - workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu") -image_service.run_server() -image_service.run_flask() diff --git a/deploy/paddleserving/image_service_gpu.py b/deploy/paddleserving/image_service_gpu.py deleted file mode 100644 index df61cdd60659713ac77176beb8c1ecfad1c8efd8..0000000000000000000000000000000000000000 --- a/deploy/paddleserving/image_service_gpu.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import base64 -from paddle_serving_server_gpu.web_service import WebService - -import utils - - -class ImageService(WebService): - def __init__(self, name): - super(ImageService, self).__init__(name=name) - self.operators = self.create_operators() - - def create_operators(self): - size = 224 - img_mean = [0.485, 0.456, 0.406] - img_std = [0.229, 0.224, 0.225] - img_scale = 1.0 / 255.0 - decode_op = utils.DecodeImage() - resize_op = utils.ResizeImage(resize_short=256) - crop_op = utils.CropImage(size=(size, size)) - normalize_op = utils.NormalizeImage( - scale=img_scale, mean=img_mean, std=img_std) - totensor_op = utils.ToTensor() - return [decode_op, resize_op, crop_op, normalize_op, totensor_op] - - def _process_image(self, data, ops): - for op in ops: - data = op(data) - return data - - def preprocess(self, feed={}, fetch=[]): - feed_batch = [] - for ins in feed: - if "image" not in ins: - raise ("feed data error!") - sample = base64.b64decode(ins["image"]) - img = self._process_image(sample, self.operators) - feed_batch.append({"image": img}) - return feed_batch, fetch - - -image_service = ImageService(name="image") -image_service.load_model_config(sys.argv[1]) -image_service.set_gpus("0") -image_service.prepare_server( - workdir=sys.argv[2], port=int(sys.argv[3]), device="gpu") -image_service.run_server() -image_service.run_flask() diff --git a/deploy/paddleserving/imagenet.label b/deploy/paddleserving/imagenet.label new file mode 100644 index 0000000000000000000000000000000000000000..d7146735146ea1894173d6d0e20fb90af36be849 --- /dev/null +++ b/deploy/paddleserving/imagenet.label @@ -0,0 +1,1000 @@ +tench, Tinca tinca, +goldfish, Carassius auratus, +great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias, +tiger shark, Galeocerdo cuvieri, +hammerhead, hammerhead shark, +electric ray, crampfish, numbfish, torpedo, +stingray, +cock, +hen, +ostrich, Struthio camelus, +brambling, Fringilla montifringilla, +goldfinch, Carduelis carduelis, +house finch, linnet, Carpodacus mexicanus, +junco, snowbird, +indigo bunting, indigo finch, indigo bird, Passerina cyanea, +robin, American robin, Turdus migratorius, +bulbul, +jay, +magpie, +chickadee, +water ouzel, dipper, +kite, +bald eagle, American eagle, Haliaeetus leucocephalus, +vulture, +great grey owl, great gray owl, Strix nebulosa, +European fire salamander, Salamandra salamandra, +common newt, Triturus vulgaris, +eft, +spotted salamander, Ambystoma maculatum, +axolotl, mud puppy, Ambystoma mexicanum, +bullfrog, Rana catesbeiana, +tree frog, tree-frog, +tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui, +loggerhead, loggerhead turtle, Caretta caretta, +leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea, +mud turtle, +terrapin, +box turtle, box tortoise, +banded gecko, +common iguana, iguana, Iguana iguana, +American chameleon, anole, Anolis carolinensis, +whiptail, whiptail lizard, +agama, +frilled lizard, Chlamydosaurus kingi, +alligator lizard, +Gila monster, Heloderma suspectum, +green lizard, Lacerta viridis, +African chameleon, Chamaeleo chamaeleon, +Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis, +African crocodile, Nile crocodile, Crocodylus niloticus, +American alligator, Alligator mississipiensis, +triceratops, +thunder snake, worm snake, Carphophis amoenus, +ringneck snake, ring-necked snake, ring snake, +hognose snake, puff adder, sand viper, +green snake, grass snake, +king snake, kingsnake, +garter snake, grass snake, +water snake, +vine snake, +night snake, Hypsiglena torquata, +boa constrictor, Constrictor constrictor, +rock python, rock snake, Python sebae, +Indian cobra, Naja naja, +green mamba, +sea snake, +horned viper, cerastes, sand viper, horned asp, Cerastes cornutus, +diamondback, diamondback rattlesnake, Crotalus adamanteus, +sidewinder, horned rattlesnake, Crotalus cerastes, +trilobite, +harvestman, daddy longlegs, Phalangium opilio, +scorpion, +black and gold garden spider, Argiope aurantia, +barn spider, Araneus cavaticus, +garden spider, Aranea diademata, +black widow, Latrodectus mactans, +tarantula, +wolf spider, hunting spider, +tick, +centipede, +black grouse, +ptarmigan, +ruffed grouse, partridge, Bonasa umbellus, +prairie chicken, prairie grouse, prairie fowl, +peacock, +quail, +partridge, +African grey, African gray, Psittacus erithacus, +macaw, +sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita, +lorikeet, +coucal, +bee eater, +hornbill, +hummingbird, +jacamar, +toucan, +drake, +red-breasted merganser, Mergus serrator, +goose, +black swan, Cygnus atratus, +tusker, +echidna, spiny anteater, anteater, +platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus, +wallaby, brush kangaroo, +koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus, +wombat, +jellyfish, +sea anemone, anemone, +brain coral, +flatworm, platyhelminth, +nematode, nematode worm, roundworm, +conch, +snail, +slug, +sea slug, nudibranch, +chiton, coat-of-mail shell, sea cradle, polyplacophore, +chambered nautilus, pearly nautilus, nautilus, +Dungeness crab, Cancer magister, +rock crab, Cancer irroratus, +fiddler crab, +king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica, +American lobster, Northern lobster, Maine lobster, Homarus americanus, +spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish, +crayfish, crawfish, crawdad, crawdaddy, +hermit crab, +isopod, +white stork, Ciconia ciconia, +black stork, Ciconia nigra, +spoonbill, +flamingo, +little blue heron, Egretta caerulea, +American egret, great white heron, Egretta albus, +bittern, +crane, +limpkin, Aramus pictus, +European gallinule, Porphyrio porphyrio, +American coot, marsh hen, mud hen, water hen, Fulica americana, +bustard, +ruddy turnstone, Arenaria interpres, +red-backed sandpiper, dunlin, Erolia alpina, +redshank, Tringa totanus, +dowitcher, +oystercatcher, oyster catcher, +pelican, +king penguin, Aptenodytes patagonica, +albatross, mollymawk, +grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus, +killer whale, killer, orca, grampus, sea wolf, Orcinus orca, +dugong, Dugong dugon, +sea lion, +Chihuahua, +Japanese spaniel, +Maltese dog, Maltese terrier, Maltese, +Pekinese, Pekingese, Peke, +Shih-Tzu, +Blenheim spaniel, +papillon, +toy terrier, +Rhodesian ridgeback, +Afghan hound, Afghan, +basset, basset hound, +beagle, +bloodhound, sleuthhound, +bluetick, +black-and-tan coonhound, +Walker hound, Walker foxhound, +English foxhound, +redbone, +borzoi, Russian wolfhound, +Irish wolfhound, +Italian greyhound, +whippet, +Ibizan hound, Ibizan Podenco, +Norwegian elkhound, elkhound, +otterhound, otter hound, +Saluki, gazelle hound, +Scottish deerhound, deerhound, +Weimaraner, +Staffordshire bullterrier, Staffordshire bull terrier, +American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier, +Bedlington terrier, +Border terrier, +Kerry blue terrier, +Irish terrier, +Norfolk terrier, +Norwich terrier, +Yorkshire terrier, +wire-haired fox terrier, +Lakeland terrier, +Sealyham terrier, Sealyham, +Airedale, Airedale terrier, +cairn, cairn terrier, +Australian terrier, +Dandie Dinmont, Dandie Dinmont terrier, +Boston bull, Boston terrier, +miniature schnauzer, +giant schnauzer, +standard schnauzer, +Scotch terrier, Scottish terrier, Scottie, +Tibetan terrier, chrysanthemum dog, +silky terrier, Sydney silky, +soft-coated wheaten terrier, +West Highland white terrier, +Lhasa, Lhasa apso, +flat-coated retriever, +curly-coated retriever, +golden retriever, +Labrador retriever, +Chesapeake Bay retriever, +German short-haired pointer, +vizsla, Hungarian pointer, +English setter, +Irish setter, red setter, +Gordon setter, +Brittany spaniel, +clumber, clumber spaniel, +English springer, English springer spaniel, +Welsh springer spaniel, +cocker spaniel, English cocker spaniel, cocker, +Sussex spaniel, +Irish water spaniel, +kuvasz, +schipperke, +groenendael, +malinois, +briard, +kelpie, +komondor, +Old English sheepdog, bobtail, +Shetland sheepdog, Shetland sheep dog, Shetland, +collie, +Border collie, +Bouvier des Flandres, Bouviers des Flandres, +Rottweiler, +German shepherd, German shepherd dog, German police dog, alsatian, +Doberman, Doberman pinscher, +miniature pinscher, +Greater Swiss Mountain dog, +Bernese mountain dog, +Appenzeller, +EntleBucher, +boxer, +bull mastiff, +Tibetan mastiff, +French bulldog, +Great Dane, +Saint Bernard, St Bernard, +Eskimo dog, husky, +malamute, malemute, Alaskan malamute, +Siberian husky, +dalmatian, coach dog, carriage dog, +affenpinscher, monkey pinscher, monkey dog, +basenji, +pug, pug-dog, +Leonberg, +Newfoundland, Newfoundland dog, +Great Pyrenees, +Samoyed, Samoyede, +Pomeranian, +chow, chow chow, +keeshond, +Brabancon griffon, +Pembroke, Pembroke Welsh corgi, +Cardigan, Cardigan Welsh corgi, +toy poodle, +miniature poodle, +standard poodle, +Mexican hairless, +timber wolf, grey wolf, gray wolf, Canis lupus, +white wolf, Arctic wolf, Canis lupus tundrarum, +red wolf, maned wolf, Canis rufus, Canis niger, +coyote, prairie wolf, brush wolf, Canis latrans, +dingo, warrigal, warragal, Canis dingo, +dhole, Cuon alpinus, +African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus, +hyena, hyaena, +red fox, Vulpes vulpes, +kit fox, Vulpes macrotis, +Arctic fox, white fox, Alopex lagopus, +grey fox, gray fox, Urocyon cinereoargenteus, +tabby, tabby cat, +tiger cat, +Persian cat, +Siamese cat, Siamese, +Egyptian cat, +cougar, puma, catamount, mountain lion, painter, panther, Felis concolor, +lynx, catamount, +leopard, Panthera pardus, +snow leopard, ounce, Panthera uncia, +jaguar, panther, Panthera onca, Felis onca, +lion, king of beasts, Panthera leo, +tiger, Panthera tigris, +cheetah, chetah, Acinonyx jubatus, +brown bear, bruin, Ursus arctos, +American black bear, black bear, Ursus americanus, Euarctos americanus, +ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus, +sloth bear, Melursus ursinus, Ursus ursinus, +mongoose, +meerkat, mierkat, +tiger beetle, +ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle, +ground beetle, carabid beetle, +long-horned beetle, longicorn, longicorn beetle, +leaf beetle, chrysomelid, +dung beetle, +rhinoceros beetle, +weevil, +fly, +bee, +ant, emmet, pismire, +grasshopper, hopper, +cricket, +walking stick, walkingstick, stick insect, +cockroach, roach, +mantis, mantid, +cicada, cicala, +leafhopper, +lacewing, lacewing fly, +"dragonfly, darning needle, devils darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", +damselfly, +admiral, +ringlet, ringlet butterfly, +monarch, monarch butterfly, milkweed butterfly, Danaus plexippus, +cabbage butterfly, +sulphur butterfly, sulfur butterfly, +lycaenid, lycaenid butterfly, +starfish, sea star, +sea urchin, +sea cucumber, holothurian, +wood rabbit, cottontail, cottontail rabbit, +hare, +Angora, Angora rabbit, +hamster, +porcupine, hedgehog, +fox squirrel, eastern fox squirrel, Sciurus niger, +marmot, +beaver, +guinea pig, Cavia cobaya, +sorrel, +zebra, +hog, pig, grunter, squealer, Sus scrofa, +wild boar, boar, Sus scrofa, +warthog, +hippopotamus, hippo, river horse, Hippopotamus amphibius, +ox, +water buffalo, water ox, Asiatic buffalo, Bubalus bubalis, +bison, +ram, tup, +bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis, +ibex, Capra ibex, +hartebeest, +impala, Aepyceros melampus, +gazelle, +Arabian camel, dromedary, Camelus dromedarius, +llama, +weasel, +mink, +polecat, fitch, foulmart, foumart, Mustela putorius, +black-footed ferret, ferret, Mustela nigripes, +otter, +skunk, polecat, wood pussy, +badger, +armadillo, +three-toed sloth, ai, Bradypus tridactylus, +orangutan, orang, orangutang, Pongo pygmaeus, +gorilla, Gorilla gorilla, +chimpanzee, chimp, Pan troglodytes, +gibbon, Hylobates lar, +siamang, Hylobates syndactylus, Symphalangus syndactylus, +guenon, guenon monkey, +patas, hussar monkey, Erythrocebus patas, +baboon, +macaque, +langur, +colobus, colobus monkey, +proboscis monkey, Nasalis larvatus, +marmoset, +capuchin, ringtail, Cebus capucinus, +howler monkey, howler, +titi, titi monkey, +spider monkey, Ateles geoffroyi, +squirrel monkey, Saimiri sciureus, +Madagascar cat, ring-tailed lemur, Lemur catta, +indri, indris, Indri indri, Indri brevicaudatus, +Indian elephant, Elephas maximus, +African elephant, Loxodonta africana, +lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens, +giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca, +barracouta, snoek, +eel, +coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch, +rock beauty, Holocanthus tricolor, +anemone fish, +sturgeon, +gar, garfish, garpike, billfish, Lepisosteus osseus, +lionfish, +puffer, pufferfish, blowfish, globefish, +abacus, +abaya, +"academic gown, academic robe, judges robe", +accordion, piano accordion, squeeze box, +acoustic guitar, +aircraft carrier, carrier, flattop, attack aircraft carrier, +airliner, +airship, dirigible, +altar, +ambulance, +amphibian, amphibious vehicle, +analog clock, +apiary, bee house, +apron, +ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin, +assault rifle, assault gun, +backpack, back pack, knapsack, packsack, rucksack, haversack, +bakery, bakeshop, bakehouse, +balance beam, beam, +balloon, +ballpoint, ballpoint pen, ballpen, Biro, +Band Aid, +banjo, +bannister, banister, balustrade, balusters, handrail, +barbell, +barber chair, +barbershop, +barn, +barometer, +barrel, cask, +barrow, garden cart, lawn cart, wheelbarrow, +baseball, +basketball, +bassinet, +bassoon, +bathing cap, swimming cap, +bath towel, +bathtub, bathing tub, bath, tub, +beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon, +beacon, lighthouse, beacon light, pharos, +beaker, +bearskin, busby, shako, +beer bottle, +beer glass, +bell cote, bell cot, +bib, +bicycle-built-for-two, tandem bicycle, tandem, +bikini, two-piece, +binder, ring-binder, +binoculars, field glasses, opera glasses, +birdhouse, +boathouse, +bobsled, bobsleigh, bob, +bolo tie, bolo, bola tie, bola, +bonnet, poke bonnet, +bookcase, +bookshop, bookstore, bookstall, +bottlecap, +bow, +bow tie, bow-tie, bowtie, +brass, memorial tablet, plaque, +brassiere, bra, bandeau, +breakwater, groin, groyne, mole, bulwark, seawall, jetty, +breastplate, aegis, egis, +broom, +bucket, pail, +buckle, +bulletproof vest, +bullet train, bullet, +butcher shop, meat market, +cab, hack, taxi, taxicab, +caldron, cauldron, +candle, taper, wax light, +cannon, +canoe, +can opener, tin opener, +cardigan, +car mirror, +carousel, carrousel, merry-go-round, roundabout, whirligig, +"carpenters kit, tool kit", +carton, +car wheel, +cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM, +cassette, +cassette player, +castle, +catamaran, +CD player, +cello, violoncello, +cellular telephone, cellular phone, cellphone, cell, mobile phone, +chain, +chainlink fence, +chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour, +chain saw, chainsaw, +chest, +chiffonier, commode, +chime, bell, gong, +china cabinet, china closet, +Christmas stocking, +church, church building, +cinema, movie theater, movie theatre, movie house, picture palace, +cleaver, meat cleaver, chopper, +cliff dwelling, +cloak, +clog, geta, patten, sabot, +cocktail shaker, +coffee mug, +coffeepot, +coil, spiral, volute, whorl, helix, +combination lock, +computer keyboard, keypad, +confectionery, confectionary, candy store, +container ship, containership, container vessel, +convertible, +corkscrew, bottle screw, +cornet, horn, trumpet, trump, +cowboy boot, +cowboy hat, ten-gallon hat, +cradle, +crane, +crash helmet, +crate, +crib, cot, +Crock Pot, +croquet ball, +crutch, +cuirass, +dam, dike, dyke, +desk, +desktop computer, +dial telephone, dial phone, +diaper, nappy, napkin, +digital clock, +digital watch, +dining table, board, +dishrag, dishcloth, +dishwasher, dish washer, dishwashing machine, +disk brake, disc brake, +dock, dockage, docking facility, +dogsled, dog sled, dog sleigh, +dome, +doormat, welcome mat, +drilling platform, offshore rig, +drum, membranophone, tympan, +drumstick, +dumbbell, +Dutch oven, +electric fan, blower, +electric guitar, +electric locomotive, +entertainment center, +envelope, +espresso maker, +face powder, +feather boa, boa, +file, file cabinet, filing cabinet, +fireboat, +fire engine, fire truck, +fire screen, fireguard, +flagpole, flagstaff, +flute, transverse flute, +folding chair, +football helmet, +forklift, +fountain, +fountain pen, +four-poster, +freight car, +French horn, horn, +frying pan, frypan, skillet, +fur coat, +garbage truck, dustcart, +gasmask, respirator, gas helmet, +gas pump, gasoline pump, petrol pump, island dispenser, +goblet, +go-kart, +golf ball, +golfcart, golf cart, +gondola, +gong, tam-tam, +gown, +grand piano, grand, +greenhouse, nursery, glasshouse, +grille, radiator grille, +grocery store, grocery, food market, market, +guillotine, +hair slide, +hair spray, +half track, +hammer, +hamper, +hand blower, blow dryer, blow drier, hair dryer, hair drier, +hand-held computer, hand-held microcomputer, +handkerchief, hankie, hanky, hankey, +hard disc, hard disk, fixed disk, +harmonica, mouth organ, harp, mouth harp, +harp, +harvester, reaper, +hatchet, +holster, +home theater, home theatre, +honeycomb, +hook, claw, +hoopskirt, crinoline, +horizontal bar, high bar, +horse cart, horse-cart, +hourglass, +iPod, +iron, smoothing iron, +"jack-o-lantern", +jean, blue jean, denim, +jeep, landrover, +jersey, T-shirt, tee shirt, +jigsaw puzzle, +jinrikisha, ricksha, rickshaw, +joystick, +kimono, +knee pad, +knot, +lab coat, laboratory coat, +ladle, +lampshade, lamp shade, +laptop, laptop computer, +lawn mower, mower, +lens cap, lens cover, +letter opener, paper knife, paperknife, +library, +lifeboat, +lighter, light, igniter, ignitor, +limousine, limo, +liner, ocean liner, +lipstick, lip rouge, +Loafer, +lotion, +loudspeaker, speaker, speaker unit, loudspeaker system, speaker system, +"loupe, jewelers loupe", +lumbermill, sawmill, +magnetic compass, +mailbag, postbag, +mailbox, letter box, +maillot, +maillot, tank suit, +manhole cover, +maraca, +marimba, xylophone, +mask, +matchstick, +maypole, +maze, labyrinth, +measuring cup, +medicine chest, medicine cabinet, +megalith, megalithic structure, +microphone, mike, +microwave, microwave oven, +military uniform, +milk can, +minibus, +miniskirt, mini, +minivan, +missile, +mitten, +mixing bowl, +mobile home, manufactured home, +Model T, +modem, +monastery, +monitor, +moped, +mortar, +mortarboard, +mosque, +mosquito net, +motor scooter, scooter, +mountain bike, all-terrain bike, off-roader, +mountain tent, +mouse, computer mouse, +mousetrap, +moving van, +muzzle, +nail, +neck brace, +necklace, +nipple, +notebook, notebook computer, +obelisk, +oboe, hautboy, hautbois, +ocarina, sweet potato, +odometer, hodometer, mileometer, milometer, +oil filter, +organ, pipe organ, +oscilloscope, scope, cathode-ray oscilloscope, CRO, +overskirt, +oxcart, +oxygen mask, +packet, +paddle, boat paddle, +paddlewheel, paddle wheel, +padlock, +paintbrush, +"pajama, pyjama, pjs, jammies", +palace, +panpipe, pandean pipe, syrinx, +paper towel, +parachute, chute, +parallel bars, bars, +park bench, +parking meter, +passenger car, coach, carriage, +patio, terrace, +pay-phone, pay-station, +pedestal, plinth, footstall, +pencil box, pencil case, +pencil sharpener, +perfume, essence, +Petri dish, +photocopier, +pick, plectrum, plectron, +pickelhaube, +picket fence, paling, +pickup, pickup truck, +pier, +piggy bank, penny bank, +pill bottle, +pillow, +ping-pong ball, +pinwheel, +pirate, pirate ship, +pitcher, ewer, +"plane, carpenters plane, woodworking plane", +planetarium, +plastic bag, +plate rack, +plow, plough, +"plunger, plumbers helper", +Polaroid camera, Polaroid Land camera, +pole, +police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria, +poncho, +pool table, billiard table, snooker table, +pop bottle, soda bottle, +pot, flowerpot, +"potters wheel", +power drill, +prayer rug, prayer mat, +printer, +prison, prison house, +projectile, missile, +projector, +puck, hockey puck, +punching bag, punch bag, punching ball, punchball, +purse, +quill, quill pen, +quilt, comforter, comfort, puff, +racer, race car, racing car, +racket, racquet, +radiator, +radio, wireless, +radio telescope, radio reflector, +rain barrel, +recreational vehicle, RV, R.V., +reel, +reflex camera, +refrigerator, icebox, +remote control, remote, +restaurant, eating house, eating place, eatery, +revolver, six-gun, six-shooter, +rifle, +rocking chair, rocker, +rotisserie, +rubber eraser, rubber, pencil eraser, +rugby ball, +rule, ruler, +running shoe, +safe, +safety pin, +saltshaker, salt shaker, +sandal, +sarong, +sax, saxophone, +scabbard, +scale, weighing machine, +school bus, +schooner, +scoreboard, +screen, CRT screen, +screw, +screwdriver, +seat belt, seatbelt, +sewing machine, +shield, buckler, +shoe shop, shoe-shop, shoe store, +shoji, +shopping basket, +shopping cart, +shovel, +shower cap, +shower curtain, +ski, +ski mask, +sleeping bag, +slide rule, slipstick, +sliding door, +slot, one-armed bandit, +snorkel, +snowmobile, +snowplow, snowplough, +soap dispenser, +soccer ball, +sock, +solar dish, solar collector, solar furnace, +sombrero, +soup bowl, +space bar, +space heater, +space shuttle, +spatula, +speedboat, +"spider web, spiders web", +spindle, +sports car, sport car, +spotlight, spot, +stage, +steam locomotive, +steel arch bridge, +steel drum, +stethoscope, +stole, +stone wall, +stopwatch, stop watch, +stove, +strainer, +streetcar, tram, tramcar, trolley, trolley car, +stretcher, +studio couch, day bed, +stupa, tope, +submarine, pigboat, sub, U-boat, +suit, suit of clothes, +sundial, +sunglass, +sunglasses, dark glasses, shades, +sunscreen, sunblock, sun blocker, +suspension bridge, +swab, swob, mop, +sweatshirt, +swimming trunks, bathing trunks, +swing, +switch, electric switch, electrical switch, +syringe, +table lamp, +tank, army tank, armored combat vehicle, armoured combat vehicle, +tape player, +teapot, +teddy, teddy bear, +television, television system, +tennis ball, +thatch, thatched roof, +theater curtain, theatre curtain, +thimble, +thresher, thrasher, threshing machine, +throne, +tile roof, +toaster, +tobacco shop, tobacconist shop, tobacconist, +toilet seat, +torch, +totem pole, +tow truck, tow car, wrecker, +toyshop, +tractor, +trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi, +tray, +trench coat, +tricycle, trike, velocipede, +trimaran, +tripod, +triumphal arch, +trolleybus, trolley coach, trackless trolley, +trombone, +tub, vat, +turnstile, +typewriter keyboard, +umbrella, +unicycle, monocycle, +upright, upright piano, +vacuum, vacuum cleaner, +vase, +vault, +velvet, +vending machine, +vestment, +viaduct, +violin, fiddle, +volleyball, +waffle iron, +wall clock, +wallet, billfold, notecase, pocketbook, +wardrobe, closet, press, +warplane, military plane, +washbasin, handbasin, washbowl, lavabo, wash-hand basin, +washer, automatic washer, washing machine, +water bottle, +water jug, +water tower, +whiskey jug, +whistle, +wig, +window screen, +window shade, +Windsor tie, +wine bottle, +wing, +wok, +wooden spoon, +wool, woolen, woollen, +worm fence, snake fence, snake-rail fence, Virginia fence, +wreck, +yawl, +yurt, +web site, website, internet site, site, +comic book, +crossword puzzle, crossword, +street sign, +traffic light, traffic signal, stoplight, +book jacket, dust cover, dust jacket, dust wrapper, +menu, +plate, +guacamole, +consomme, +hot pot, hotpot, +trifle, +ice cream, icecream, +ice lolly, lolly, lollipop, popsicle, +French loaf, +bagel, beigel, +pretzel, +cheeseburger, +hotdog, hot dog, red hot, +mashed potato, +head cabbage, +broccoli, +cauliflower, +zucchini, courgette, +spaghetti squash, +acorn squash, +butternut squash, +cucumber, cuke, +artichoke, globe artichoke, +bell pepper, +cardoon, +mushroom, +Granny Smith, +strawberry, +orange, +lemon, +fig, +pineapple, ananas, +banana, +jackfruit, jak, jack, +custard apple, +pomegranate, +hay, +carbonara, +chocolate sauce, chocolate syrup, +dough, +meat loaf, meatloaf, +pizza, pizza pie, +potpie, +burrito, +red wine, +espresso, +cup, +eggnog, +alp, +bubble, +cliff, drop, drop-off, +coral reef, +geyser, +lakeside, lakeshore, +promontory, headland, head, foreland, +sandbar, sand bar, +seashore, coast, seacoast, sea-coast, +valley, vale, +volcano, +ballplayer, baseball player, +groom, bridegroom, +scuba diver, +rapeseed, +daisy, +"yellow ladys slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", +corn, +acorn, +hip, rose hip, rosehip, +buckeye, horse chestnut, conker, +coral fungus, +agaric, +gyromitra, +stinkhorn, carrion fungus, +earthstar, +hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa, +bolete, +ear, spike, capitulum, +toilet tissue, toilet paper, bathroom tissue diff --git a/deploy/paddleserving/imgs/results.png b/deploy/paddleserving/imgs/results.png new file mode 100644 index 0000000000000000000000000000000000000000..4d6db757a19cb0355ca8e8e8675a6d5d7671b022 Binary files /dev/null and b/deploy/paddleserving/imgs/results.png differ diff --git a/deploy/paddleserving/imgs/start_server.png b/deploy/paddleserving/imgs/start_server.png new file mode 100644 index 0000000000000000000000000000000000000000..8294e19b63ee3908f887d4a7d85bc421d360a371 Binary files /dev/null and b/deploy/paddleserving/imgs/start_server.png differ diff --git a/deploy/paddleserving/pipeline_http_client.py b/deploy/paddleserving/pipeline_http_client.py new file mode 100644 index 0000000000000000000000000000000000000000..49b3ce00615e946faaf5b57736a9edc5889ac3bd --- /dev/null +++ b/deploy/paddleserving/pipeline_http_client.py @@ -0,0 +1,17 @@ +import requests +import json +import base64 +import os + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +if __name__ == "__main__": + url = "http://127.0.0.1:18080/imagenet/prediction" + with open(os.path.join(".", "daisy.jpg"), 'rb') as file: + image_data1 = file.read() + image = cv2_to_base64(image_data1) + data = {"key": ["image"], "value": [image]} + for i in range(100): + r = requests.post(url=url, data=json.dumps(data)) + print(r.json()) diff --git a/deploy/paddleserving/pipeline_rpc_client.py b/deploy/paddleserving/pipeline_rpc_client.py new file mode 100644 index 0000000000000000000000000000000000000000..75bcae0dd4d2088e01de96d07df2b67c8245a524 --- /dev/null +++ b/deploy/paddleserving/pipeline_rpc_client.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + from paddle_serving_server_gpu.pipeline import PipelineClient +except ImportError: + from paddle_serving_server.pipeline import PipelineClient +import base64 + +client = PipelineClient() +client.connect(['127.0.0.1:9993']) + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +if __name__ == "__main__": + with open("daisy.jpg", 'rb') as file: + image_data = file.read() + image = cv2_to_base64(image_data) + + for i in range(1): + ret = client.predict(feed_dict={"image": image}, fetch=["label", "prob"]) + print(ret) diff --git a/deploy/paddleserving/utils.py b/deploy/paddleserving/utils.py deleted file mode 100644 index 6c4a75e1afe2fc1e1710c7e8213f8ac4de8ffcc2..0000000000000000000000000000000000000000 --- a/deploy/paddleserving/utils.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import cv2 -import numpy as np - - -class DecodeImage(object): - def __init__(self, to_rgb=True): - self.to_rgb = to_rgb - - def __call__(self, img): - data = np.frombuffer(img, dtype='uint8') - img = cv2.imdecode(data, 1) - if self.to_rgb: - assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( - img.shape) - img = img[:, :, ::-1] - - return img - - -class ResizeImage(object): - def __init__(self, resize_short=None): - self.resize_short = resize_short - - def __call__(self, img): - img_h, img_w = img.shape[:2] - percent = float(self.resize_short) / min(img_w, img_h) - w = int(round(img_w * percent)) - h = int(round(img_h * percent)) - return cv2.resize(img, (w, h)) - - -class CropImage(object): - def __init__(self, size): - if type(size) is int: - self.size = (size, size) - else: - self.size = size - - def __call__(self, img): - w, h = self.size - img_h, img_w = img.shape[:2] - w_start = (img_w - w) // 2 - h_start = (img_h - h) // 2 - - w_end = w_start + w - h_end = h_start + h - return img[h_start:h_end, w_start:w_end, :] - - -class NormalizeImage(object): - def __init__(self, scale=None, mean=None, std=None): - self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) - mean = mean if mean is not None else [0.485, 0.456, 0.406] - std = std if std is not None else [0.229, 0.224, 0.225] - - shape = (1, 1, 3) - self.mean = np.array(mean).reshape(shape).astype('float32') - self.std = np.array(std).reshape(shape).astype('float32') - - def __call__(self, img): - return (img.astype('float32') * self.scale - self.mean) / self.std - - -class ToTensor(object): - def __init__(self): - pass - - def __call__(self, img): - img = img.transpose((2, 0, 1)) - return img diff --git a/docs/en/tutorials/quick_start_recognition_en.md b/docs/en/tutorials/quick_start_recognition_en.md index cd14d2bb79e383dc72a558fc8b4a3f5408f6fa5e..4d82cd2afba91fc57e632948e1697a039ecba107 100644 --- a/docs/en/tutorials/quick_start_recognition_en.md +++ b/docs/en/tutorials/quick_start_recognition_en.md @@ -90,13 +90,13 @@ wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/infere cd .. # Download the demo data and unzip it -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognition_demo_data_en_v1.0.tar && tar -xf recognition_demo_data_en_v1.0.tar +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognition_demo_data_en_v1.1.tar && tar -xf recognition_demo_data_en_v1.1.tar ``` -Once unpacked, the `recognition_demo_data_v1.0` folder should have the following file structure. +Once unpacked, the `recognition_demo_data_v1.1` folder should have the following file structure. ``` -├── recognition_demo_data_v1.0 +├── recognition_demo_data_v1.1 │ ├── gallery_cartoon │ ├── gallery_logo │ ├── gallery_product @@ -126,13 +126,21 @@ The `models` folder should have the following file structure. ### 2.2 Product Recognition and Retrieval -Take the product recognition demo as an example to show the recognition and retrieval process (if you wish to try other scenarios of recognition and retrieval, replace the corresponding configuration file after downloading and unzipping the corresponding demo data and model to complete the prediction)。 +Take the product recognition demo as an example to show the recognition and retrieval process (if you wish to try other scenarios of recognition and retrieval, replace the corresponding configuration file after downloading and unzipping the corresponding demo data and model to complete the prediction). +**Note:** `faiss` is used as search library. The installation method is as follows: + +``` +pip install faiss-cpu==1.7.1post2 +``` + +If error happens when using `import faiss`, please uninstall `faiss` and reinstall it, especially on `Windows`. + #### 2.2.1 Single Image Recognition -Run the following command to identify and retrieve the image `./recognition_demo_data_v1.0/test_product/daoxiangcunjinzhubing_6.jpg` for recognition and retrieval +Run the following command to identify and retrieve the image `./recognition_demo_data_v1.1/test_product/daoxiangcunjinzhubing_6.jpg` for recognition and retrieval ```shell # use the following command to predict using GPU. @@ -141,8 +149,6 @@ python3.7 python/predict_system.py -c configs/inference_product.yaml python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.use_gpu=False ``` -**Note:** Program lib used to build index is compliled on our machine, if error occured because of the environment, you can refer to [vector search tutorial](../../../deploy/vector_search/README.md) to rebuild the lib. - The image to be retrieved is shown below. @@ -175,7 +181,7 @@ If you want to predict the images in the folder, you can directly modify the `Gl ```shell # using the following command to predict using GPU, you can append `-o Global.use_gpu=False` to predict using CPU. -python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.0/test_product/" +python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.1/test_product/" ``` @@ -195,16 +201,16 @@ The results on the screen are shown as following. All the visualization results are also saved in folder `output`. -Furthermore, the recognition inference model path can be changed by modifying the `Global.rec_inference_model_dir` field, and the path of the index to the index databass can be changed by modifying the `IndexProcess.index_path` field. +Furthermore, the recognition inference model path can be changed by modifying the `Global.rec_inference_model_dir` field, and the path of the index to the index databass can be changed by modifying the `IndexProcess.index_dir` field. ## 3. Recognize Images of Unknown Category -To recognize the image `./recognition_demo_data_v1.0/test_product/anmuxi.jpg`, run the command as follows: +To recognize the image `./recognition_demo_data_v1.1/test_product/anmuxi.jpg`, run the command as follows: ```shell -python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.0/test_product/anmuxi.jpg" +python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.1/test_product/anmuxi.jpg" ``` The image to be retrieved is shown below. @@ -225,14 +231,14 @@ When the index database cannot cover the scenes we actually recognise, i.e. when First, you need to copy the images which are similar with the image to retrieval to the original images for the index database. The command is as follows. ```shell -cp -r ../docs/images/recognition/product_demo/gallery/anmuxi ./recognition_demo_data_v1.0/gallery_product/gallery/ +cp -r ../docs/images/recognition/product_demo/gallery/anmuxi ./recognition_demo_data_/gallery_product/gallery/ ``` Then you need to create a new label file which records the image path and label information. Use the following command to create a new file based on the original one. ```shell # copy the file -cp recognition_demo_data_v1.0/gallery_product/data_file.txt recognition_demo_data_v1.0/gallery_product/data_file_update.txt +cp recognition_demo_data_v1.1/gallery_product/data_file.txt recognition_demo_data_v1.1/gallery_product/data_file_update.txt ``` Then add some new lines into the new label file, which is shown as follows. @@ -255,20 +261,20 @@ Each line can be splited into two fields. The first field denotes the relative i Use the following command to build the index to accelerate the retrieval process after recognition. ```shell -python3.7 python/build_gallery.py -c configs/build_product.yaml -o IndexProcess.data_file="./recognition_demo_data_v1.0/gallery_product/data_file_update.txt" -o IndexProcess.index_path="./recognition_demo_data_v1.0/gallery_product/index_update" +python3.7 python/build_gallery.py -c configs/build_product.yaml -o IndexProcess.data_file="./recognition_demo_data_v1.1/gallery_product/data_file_update.txt" -o IndexProcess.index_dir="./recognition_demo_data_v1.1/gallery_product/index_update" ``` -Finally, the new index information is stored in the folder`./recognition_demo_data_v1.0/gallery_product/index_update`. Use the new index database for the above index. +Finally, the new index information is stored in the folder`./recognition_demo_data_v1.1/gallery_product/index_update`. Use the new index database for the above index. ### 3.2 Recognize the Unknown Category Images -To recognize the image `./recognition_demo_data_v1.0/test_product/anmuxi.jpg`, run the command as follows. +To recognize the image `./recognition_demo_data_v1.1/test_product/anmuxi.jpg`, run the command as follows. ```shell # using the following command to predict using GPU, you can append `-o Global.use_gpu=False` to predict using CPU. -python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.0/test_product/anmuxi.jpg" -o IndexProcess.index_path="./recognition_demo_data_v1.0/gallery_product/index_update" +python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.1/test_product/anmuxi.jpg" -o IndexProcess.index_dir="./recognition_demo_data_v1.1/gallery_product/index_update" ``` The output is as follows: diff --git a/docs/images/ml_illustration.jpg b/docs/images/ml_illustration.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69dced96bcbc9f40ac08684301d20691ea07e93d Binary files /dev/null and b/docs/images/ml_illustration.jpg differ diff --git a/docs/images/ml_pipeline.jpg b/docs/images/ml_pipeline.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cac6508924dcbe964244727c56239eb064c30ba1 Binary files /dev/null and b/docs/images/ml_pipeline.jpg differ diff --git a/docs/zh_CN/tutorials/quick_start_recognition.md b/docs/zh_CN/tutorials/quick_start_recognition.md index 2e2fab8316a307aac105f669675270b2425f4c79..19b8fed91925935a21f279ef7833867a70b8ebf2 100644 --- a/docs/zh_CN/tutorials/quick_start_recognition.md +++ b/docs/zh_CN/tutorials/quick_start_recognition.md @@ -46,8 +46,8 @@ 本章节demo数据下载地址如下: [数据下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognition_demo_data_v1.1.tar)。 - **注意** + 1. windows 环境下如果没有安装wget,可以按照下面的步骤安装wget与tar命令,也可以在,下载模型时将链接复制到浏览器中下载,并解压放置在相应目录下;linux或者macOS用户可以右键点击,然后复制下载链接,即可通过`wget`命令下载。 2. 如果macOS环境下没有安装`wget`命令,可以运行下面的命令进行安装。 @@ -74,8 +74,8 @@ cd .. wget {数据下载链接地址} && tar -xf {压缩包的名称} ``` - + ### 2.1 下载、解压inference 模型与demo数据 以商品识别为例,下载demo数据集以及通用检测、识别模型,命令如下。 @@ -90,13 +90,13 @@ wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/infere cd ../ # 下载demo数据并解压 -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognition_demo_data_v1.0.tar && tar -xf recognition_demo_data_v1.0.tar +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognition_demo_data_v1.1.tar && tar -xf recognition_demo_data_v1.1.tar ``` -解压完毕后,`recognition_demo_data_v1.0`文件夹下应有如下文件结构: +解压完毕后,`recognition_demo_data_v1.1`文件夹下应有如下文件结构: ``` -├── recognition_demo_data_v1.0 +├── recognition_demo_data_v1.1 │ ├── gallery_cartoon │ ├── gallery_logo │ ├── gallery_product @@ -129,11 +129,19 @@ wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognit 以商品识别demo为例,展示识别与检索过程(如果希望尝试其他方向的识别与检索效果,在下载解压好对应的demo数据与模型之后,替换对应的配置文件即可完成预测)。 +注意,此部分使用了`faiss`作为检索库,安装方法如下: + +```python +pip install faiss-cpu==1.7.1post2 +``` + +若使用时,不能正常引用,则`uninstall` 之后,重新`install`,尤其是windows下。 + #### 2.2.1 识别单张图像 -运行下面的命令,对图像`./recognition_demo_data_v1.0/test_product/daoxiangcunjinzhubing_6.jpg`进行识别与检索 +运行下面的命令,对图像`./recognition_demo_data_v1.1/test_product/daoxiangcunjinzhubing_6.jpg`进行识别与检索 ```shell # 使用下面的命令使用GPU进行预测 @@ -142,8 +150,6 @@ python3.7 python/predict_system.py -c configs/inference_product.yaml python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.use_gpu=False ``` -注意:这里使用了默认编译生成的库文件进行特征索引,如果与您的环境不兼容,导致程序报错,可以参考[向量检索教程](../../../deploy/vector_search/README.md)重新编译库文件。 - 待检索图像如下所示。
@@ -173,7 +179,7 @@ python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.u ```shell # 使用下面的命令使用GPU进行预测,如果希望使用CPU预测,可以在命令后面添加-o Global.use_gpu=False -python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.0/test_product/" +python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.1/test_product/" ``` 终端中会输出该文件夹内所有图像的识别结果,如下所示。 @@ -193,17 +199,17 @@ python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.i 所有图像的识别结果可视化图像也保存在`output`文件夹内。 -更多地,可以通过修改`Global.rec_inference_model_dir`字段来更改识别inference模型的路径,通过修改`IndexProcess.index_path`字段来更改索引库索引的路径。 - +更多地,可以通过修改`Global.rec_inference_model_dir`字段来更改识别inference模型的路径,通过修改`IndexProcess.index_dir`字段来更改索引库索引的路径。 + ## 3. 未知类别的图像识别体验 -对图像`./recognition_demo_data_v1.0/test_product/anmuxi.jpg`进行识别,命令如下 +对图像`./recognition_demo_data_v1.1/test_product/anmuxi.jpg`进行识别,命令如下 ```shell # 使用下面的命令使用GPU进行预测,如果希望使用CPU预测,可以在命令后面添加-o Global.use_gpu=False -python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.0/test_product/anmuxi.jpg" +python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.1/test_product/anmuxi.jpg" ``` 待检索图像如下所示。 @@ -222,20 +228,20 @@ python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.i ### 3.1 准备新的数据与标签 -首先需要将与待检索图像相似的图像列表拷贝到索引库原始图像的文件夹(`./recognition_demo_data_v1.0/gallery_product/gallery`)中,运行下面的命令拷贝相似图像。 +首先需要将与待检索图像相似的图像列表拷贝到索引库原始图像的文件夹(`./recognition_demo_data_v1.1/gallery_product/gallery`)中,运行下面的命令拷贝相似图像。 ```shell -cp -r ../docs/images/recognition/product_demo/gallery/anmuxi ./recognition_demo_data_v1.0/gallery_product/gallery/ +cp -r ../docs/images/recognition/product_demo/gallery/anmuxi ./recognition_demo_data_v1.1/gallery_product/gallery/ ``` -然后需要编辑记录了图像路径和标签信息的文本文件(`./recognition_demo_data_v1.0/gallery_product/data_file_update.txt`),这里基于原始标签文件,新建一个文件。命令如下。 +然后需要编辑记录了图像路径和标签信息的文本文件(`./recognition_demo_data_v1.1/gallery_product/data_file_update.txt`),这里基于原始标签文件,新建一个文件。命令如下。 ```shell # 复制文件 -cp recognition_demo_data_v1.0/gallery_product/data_file.txt recognition_demo_data_v1.0/gallery_product/data_file_update.txt +cp recognition_demo_data_v1.1/gallery_product/data_file.txt recognition_demo_data_v1.1/gallery_product/data_file_update.txt ``` -然后在文件`recognition_demo_data_v1.0/gallery_product/data_file_update.txt`中添加以下的信息, +然后在文件`recognition_demo_data_v1.1/gallery_product/data_file_update.txt`中添加以下的信息, ``` gallery/anmuxi/001.jpg 安慕希酸奶 @@ -255,20 +261,20 @@ gallery/anmuxi/006.jpg 安慕希酸奶 使用下面的命令构建index索引,加速识别后的检索过程。 ```shell -python3.7 python/build_gallery.py -c configs/build_product.yaml -o IndexProcess.data_file="./recognition_demo_data_v1.0/gallery_product/data_file_update.txt" -o IndexProcess.index_path="./recognition_demo_data_v1.0/gallery_product/index_update" +python3.7 python/build_gallery.py -c configs/build_product.yaml -o IndexProcess.data_file="./recognition_demo_data_v1.1/gallery_product/data_file_update.txt" -o IndexProcess.index_dir="./recognition_demo_data_v1.1/gallery_product/index_update" ``` -最终新的索引信息保存在文件夹`./recognition_demo_data_v1.0/gallery_product/index_update`中。 - +最终新的索引信息保存在文件夹`./recognition_demo_data_v1.1/gallery_product/index_update`中。 + ### 3.3 基于新的索引库的图像识别 使用新的索引库,对上述图像进行识别,运行命令如下。 ```shell # 使用下面的命令使用GPU进行预测,如果希望使用CPU预测,可以在命令后面添加-o Global.use_gpu=False -python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.0/test_product/anmuxi.jpg" -o IndexProcess.index_path="./recognition_demo_data_v1.0/gallery_product/index_update" +python3.7 python/predict_system.py -c configs/inference_product.yaml -o Global.infer_imgs="./recognition_demo_data_v1.1/test_product/anmuxi.jpg" -o IndexProcess.index_dir="./recognition_demo_data_v1.1/gallery_product/index_update" ``` 输出结果如下。 diff --git a/docs/zh_CN_tmp/algorithm_introduction/metric_learning.md b/docs/zh_CN_tmp/algorithm_introduction/metric_learning.md new file mode 100644 index 0000000000000000000000000000000000000000..789d4940b1d9da969355a3d3b27ca612429f47f3 --- /dev/null +++ b/docs/zh_CN_tmp/algorithm_introduction/metric_learning.md @@ -0,0 +1,26 @@ +# Metric Learning + +## 简介 + 在机器学习中,我们经常会遇到度量数据间距离的问题。一般来说,对于可度量的数据,我们可以直接通过欧式距离(Euclidean Distance),向量内积(Inner Product)或者是余弦相似度(Cosine Similarity)来进行计算。但对于非结构化数据来说,我们却很难进行这样的操作,如计算一段视频和一首音乐的匹配程度。由于数据格式的不同,我们难以直接进行上述的向量运算,但先验知识告诉我们ED(laugh_video, laugh_music) < ED(laugh_video, blue_music), 如何去有效得表征这种”距离”关系呢? 这就是Metric Learning所要研究的课题。 + + Metric learning全称是 Distance Metric Learning,它是通过机器学习的形式,根据训练数据,自动构造出一种基于特定任务的度量函数。Metric Learning的目标是学习一个变换函数(线性非线性均可)L,将数据点从原始的向量空间映射到一个新的向量空间,在新的向量空间里相似点的距离更近,非相似点的距离更远,使得度量更符合任务的要求,如下图所示。 Deep Metric Learning,就是用深度神经网络来拟合这个变换函数。 +![example](../../images/ml_illustration.jpg) + + +## 应用 + Metric Learning技术在生活实际中应用广泛,如我们耳熟能详的人脸识别(Face Recognition)、行人重识别(Person ReID)、图像检索(Image Retrieval)、细粒度分类(Fine-gained classification)等. 随着深度学习在工业实践中越来越广泛的应用,目前大家研究的方向基本都偏向于Deep Metric Learning(DML). + + 一般来说, DML包含三个部分: 特征提取网络来map embedding, 一个采样策略来将一个mini-batch里的样本组合成很多个sub-set, 最后loss function在每个sub-set上计算loss. 如下图所示: + ![image](../../images/ml_pipeline.jpg) + + +## 算法 + Metric Learning主要有如下两种学习范式: +### 1. Classification based: + 这是一类基于分类标签的Metric Learning方法。这类方法通过将每个样本分类到正确的类别中,来学习有效的特征表示,学习过程中需要每个样本的显式标签参与Loss计算。常见的算法有[L2-Softmax](https://arxiv.org/abs/1703.09507), [Large-margin Softmax](https://arxiv.org/abs/1612.02295), [Angular Softmax](https://arxiv.org/pdf/1704.08063.pdf), [NormFace](https://arxiv.org/abs/1704.06369), [AM-Softmax](https://arxiv.org/abs/1801.05599), [CosFace](https://arxiv.org/abs/1801.09414), [ArcFace](https://arxiv.org/abs/1801.07698)等。 + 这类方法也被称作是proxy-based, 因为其本质上优化的是样本和一堆proxies之间的相似度。 +### 2. Pairwise based: + 这是一类基于样本对的学习范式。他以样本对作为输入,通过直接学习样本对之间的相似度来得到有效的特征表示,常见的算法包括:[Contrastive loss](http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf), [Triplet loss](https://arxiv.org/abs/1503.03832), [Lifted-Structure loss](https://arxiv.org/abs/1511.06452), [N-pair loss](https://papers.nips.cc/paper/2016/file/6b180037abbebea991d8b1232f8a8ca9-Paper.pdf), [Multi-Similarity loss](https://arxiv.org/pdf/1904.06627.pdf)等 + +2020年发表的[CircleLoss](https://arxiv.org/abs/2002.10857),从一个全新的视角统一了两种学习范式,让研究人员和从业者对Metric Learning问题有了更进一步的思考。 + diff --git a/requirements.txt b/requirements.txt index db2e5a08c906164179d7161b1d8a898a8b897c82..0dd6753192900026fb2921b7bd1b0687bef05ad8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,4 +8,4 @@ visualdl >= 2.0.0b scipy scikit-learn==0.23.2 gast==0.3.3 -faiss-cpu==1.7.1 +faiss-cpu==1.7.1.post2 diff --git a/tests/DarkNet53.txt b/tests/config/DarkNet53.txt similarity index 100% rename from tests/DarkNet53.txt rename to tests/config/DarkNet53.txt diff --git a/tests/HRNet_W18_C.txt b/tests/config/HRNet_W18_C.txt similarity index 100% rename from tests/HRNet_W18_C.txt rename to tests/config/HRNet_W18_C.txt diff --git a/tests/LeViT_128S.txt b/tests/config/LeViT_128S.txt similarity index 100% rename from tests/LeViT_128S.txt rename to tests/config/LeViT_128S.txt diff --git a/tests/MobileNetV1.txt b/tests/config/MobileNetV1.txt similarity index 100% rename from tests/MobileNetV1.txt rename to tests/config/MobileNetV1.txt diff --git a/tests/MobileNetV2.txt b/tests/config/MobileNetV2.txt similarity index 100% rename from tests/MobileNetV2.txt rename to tests/config/MobileNetV2.txt diff --git a/tests/MobileNetV3_large_x1_0.txt b/tests/config/MobileNetV3_large_x1_0.txt similarity index 100% rename from tests/MobileNetV3_large_x1_0.txt rename to tests/config/MobileNetV3_large_x1_0.txt diff --git a/tests/ResNeXt101_vd_64x4d.txt b/tests/config/ResNeXt101_vd_64x4d.txt similarity index 100% rename from tests/ResNeXt101_vd_64x4d.txt rename to tests/config/ResNeXt101_vd_64x4d.txt diff --git a/tests/ResNet50_vd.txt b/tests/config/ResNet50_vd.txt similarity index 92% rename from tests/ResNet50_vd.txt rename to tests/config/ResNet50_vd.txt index da02c8894b0c13981742bf698f95450a2b3e3082..425d0c8eabbf3dfc539c791112a11c29a08d0816 100644 --- a/tests/ResNet50_vd.txt +++ b/tests/config/ResNet50_vd.txt @@ -49,3 +49,10 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o Global.save_log_path:null -o Global.benchmark:True null:null +null:null +===========================cpp_infer_params=========================== +use_gpu:0|1 +cpu_threads:1|6 +use_mkldnn:0|1 +use_tensorrt:0|1 +use_fp16:0|1 diff --git a/tests/ShuffleNetV2_x1_0.txt b/tests/config/ShuffleNetV2_x1_0.txt similarity index 100% rename from tests/ShuffleNetV2_x1_0.txt rename to tests/config/ShuffleNetV2_x1_0.txt diff --git a/tests/SwinTransformer_tiny_patch4_window7_224.txt b/tests/config/SwinTransformer_tiny_patch4_window7_224.txt similarity index 100% rename from tests/SwinTransformer_tiny_patch4_window7_224.txt rename to tests/config/SwinTransformer_tiny_patch4_window7_224.txt diff --git a/tests/config/cpp_config.txt b/tests/config/cpp_config.txt new file mode 100755 index 0000000000000000000000000000000000000000..2f128644728dd1cb34d994e6300310da0fe8aa63 --- /dev/null +++ b/tests/config/cpp_config.txt @@ -0,0 +1,19 @@ +# model load config +gpu_id 0 +gpu_mem 2000 + +# whole chain test will add following config +# use_gpu 0 +# cpu_threads 10 +# use_mkldnn 1 +# use_tensorrt 0 +# use_fp16 0 + +# cls config +cls_model_path ../inference/inference.pdmodel +cls_params_path ../inference/inference.pdiparams +resize_short_size 256 +crop_size 224 + +# for log env info +benchmark 1 diff --git a/tests/prepare.sh b/tests/prepare.sh index 55e1f2c7f0898779cfba6b91f2a0f0a789931170..35782fd01322bab4398a6b26b36c86de2b15efe7 100644 --- a/tests/prepare.sh +++ b/tests/prepare.sh @@ -33,7 +33,7 @@ if [ ${MODE} = "lite_train_infer" ] || [ ${MODE} = "whole_infer" ];then mv train.txt train_list.txt mv val.txt val_list.txt cd ../../ -elif [ ${MODE} = "infer" ];then +elif [ ${MODE} = "infer" ] || [ ${MODE} = "cpp_infer" ];then # download data cd dataset rm -rf ILSVRC2012 @@ -58,3 +58,63 @@ elif [ ${MODE} = "whole_train_infer" ];then mv val.txt val_list.txt cd ../../ fi + +if [ ${MODE} = "cpp_infer" ];then + cd deploy/cpp + echo "################### build opencv ###################" + rm -rf 3.4.7.tar.gz opencv-3.4.7/ + wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz + tar -xf 3.4.7.tar.gz + install_path=$(pwd)/opencv-3.4.7/opencv3 + cd opencv-3.4.7/ + + rm -rf build + mkdir build + cd build + cmake .. \ + -DCMAKE_INSTALL_PREFIX=${install_path} \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + -DWITH_IPP=OFF \ + -DBUILD_IPP_IW=OFF \ + -DWITH_LAPACK=OFF \ + -DWITH_EIGEN=OFF \ + -DCMAKE_INSTALL_LIBDIR=lib64 \ + -DWITH_ZLIB=ON \ + -DBUILD_ZLIB=ON \ + -DWITH_JPEG=ON \ + -DBUILD_JPEG=ON \ + -DWITH_PNG=ON \ + -DBUILD_PNG=ON \ + -DWITH_TIFF=ON \ + -DBUILD_TIFF=ON + make -j + make install + cd ../../ + echo "################### build opencv finished ###################" + + echo "################### build PaddleClas demo ####################" + OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/ + LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/ + CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`) + CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`) + + BUILD_DIR=build + rm -rf ${BUILD_DIR} + mkdir ${BUILD_DIR} + cd ${BUILD_DIR} + cmake .. \ + -DPADDLE_LIB=${LIB_DIR} \ + -DWITH_MKL=ON \ + -DDEMO_NAME=clas_system \ + -DWITH_GPU=OFF \ + -DWITH_STATIC_LIB=OFF \ + -DWITH_TENSORRT=OFF \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DOPENCV_DIR=${OPENCV_DIR} \ + -DCUDNN_LIB=${CUDNN_LIB_DIR} \ + -DCUDA_LIB=${CUDA_LIB_DIR} \ + + make -j + echo "################### build PaddleClas demo finished ###################" +fi diff --git a/tests/test.sh b/tests/test.sh index c717f77de3496018459655d46437eabccc142834..9c0d8236c649de0d4a216a7b791ccb8fce023491 100644 --- a/tests/test.sh +++ b/tests/test.sh @@ -1,6 +1,6 @@ #!/bin/bash FILENAME=$1 -# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'] +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer'] MODE=$2 dataline=$(cat ${FILENAME}) @@ -145,10 +145,80 @@ benchmark_value=$(func_parser_value "${lines[49]}") infer_key1=$(func_parser_key "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}") +if [ ${MODE} = "cpp_infer" ]; then + cpp_use_gpu_key=$(func_parser_key "${lines[53]}") + cpp_use_gpu_list=$(func_parser_value "${lines[53]}") + cpp_cpu_threads_key=$(func_parser_key "${lines[54]}") + cpp_cpu_threads_list=$(func_parser_value "${lines[54]}") + cpp_use_mkldnn_key=$(func_parser_key "${lines[55]}") + cpp_use_mkldnn_list=$(func_parser_value "${lines[55]}") + cpp_use_tensorrt_key=$(func_parser_key "${lines[56]}") + cpp_use_tensorrt_list=$(func_parser_value "${lines[56]}") + cpp_use_fp16_key=$(func_parser_key "${lines[57]}") + cpp_use_fp16_list=$(func_parser_value "${lines[57]}") +fi + LOG_PATH="./tests/output" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results.log" +function func_cpp_inference(){ + IFS='|' + _script=$1 + _log_path=$2 + _img_dir=$3 + # inference + for use_gpu in ${cpp_use_gpu_list[*]}; do + if [ ${use_gpu} = "0" ] || [ ${use_gpu} = "cpu" ]; then + for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do + if [ ${use_mkldnn} = "0" ] && [ ${_flag_quant} = "True" ]; then + continue + fi + for threads in ${cpp_cpu_threads_list[*]}; do + _save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}.log" + set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}") + cp ../tests/config/cpp_config.txt cpp_config.txt + echo "${cpp_use_gpu_key} ${use_gpu}" >> cpp_config.txt + echo "${cpp_cpu_threads_key} ${threads}" >> cpp_config.txt + echo "${cpp_use_mkldnn_key} ${use_mkldnn}" >> cpp_config.txt + echo "${cpp_use_tensorrt_key} 0" >> cpp_config.txt + echo "${cpp_use_fp16_key} 0" >> cpp_config.txt + command="${_script} cpp_config.txt ${_img_dir} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + done + done + elif [ ${use_gpu} = "1" ] || [ ${use_gpu} = "gpu" ]; then + for use_trt in ${cpp_use_tensorrt_list[*]}; do + for precision in ${cpp_use_fp16_list[*]}; do + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then + continue + fi + _save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + cp ../tests/config/cpp_config.txt cpp_config.txt + echo "${cpp_use_gpu_key} ${use_gpu}" >> cpp_config.txt + echo "${cpp_cpu_threads_key} ${threads}" >> cpp_config.txt + echo "${cpp_use_mkldnn_key} ${use_mkldnn}" >> cpp_config.txt + echo "${cpp_use_tensorrt_key} ${use_trt}" >> cpp_config.txt + echo "${cpp_use_fp16_key} ${precision}" >> cpp_config.txt + command="${_script} cpp_config.txt ${_img_dir} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} function func_inference(){ IFS='|' @@ -247,6 +317,10 @@ if [ ${MODE} = "infer" ]; then Count=$(($Count + 1)) done cd .. +elif [ ${MODE} = "cpp_infer" ]; then + cd deploy + func_cpp_inference "./cpp/build/clas_system" "../${LOG_PATH}" "${infer_img_dir}" + cd .. else IFS="|"