diff --git a/deploy/openvino/demo/classifier.cpp b/deploy/openvino/demo/classifier.cpp index 5b5b3c387489749704901ef004608cfafe1103a8..2b5c17f08840afda69f48afd8e20018effd6a618 100755 --- a/deploy/openvino/demo/classifier.cpp +++ b/deploy/openvino/demo/classifier.cpp @@ -22,7 +22,7 @@ #include "include/paddlex/paddlex.h" DEFINE_string(model_dir, "", "Path of inference model"); -DEFINE_string(cfg_dir, "", "Path of PaddelX model yml file"); +DEFINE_string(cfg_file, "", "Path of PaddelX model yml file"); DEFINE_string(device, "CPU", "Device name"); DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image_list, "", "Path of test image list file"); @@ -35,8 +35,8 @@ int main(int argc, char** argv) { std::cerr << "--model_dir need to be defined" << std::endl; return -1; } - if (FLAGS_cfg_dir == "") { - std::cerr << "--cfg_dir need to be defined" << std::endl; + if (FLAGS_cfg_file == "") { + std::cerr << "--cfg_file need to be defined" << std::endl; return -1; } if (FLAGS_image == "" & FLAGS_image_list == "") { @@ -46,7 +46,7 @@ int main(int argc, char** argv) { // 加载模型 PaddleX::Model model; - model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_device); + model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_device); // 进行预测 if (FLAGS_image_list != "") { diff --git a/deploy/openvino/demo/detector.cpp b/deploy/openvino/demo/detector.cpp index a1a4db7a42dcbadfca157d0a0c0da3a9805b420e..dd68fa8b9348840fa1fccf54d9b8aff9b9d56bbf 100644 --- a/deploy/openvino/demo/detector.cpp +++ b/deploy/openvino/demo/detector.cpp @@ -29,7 +29,7 @@ using namespace std::chrono; // NOLINT DEFINE_string(model_dir, "", "Path of openvino model xml file"); -DEFINE_string(cfg_dir, "", "Path of PaddleX model yaml file"); +DEFINE_string(cfg_file, "", "Path of PaddleX model yaml file"); DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_string(device, "CPU", "Device name"); @@ -45,8 +45,8 @@ int main(int argc, char** argv) { std::cerr << "--model_dir need to be defined" << std::endl; return -1; } - if (FLAGS_cfg_dir == "") { - std::cerr << "--cfg_dir need to be defined" << std::endl; + if (FLAGS_cfg_file == "") { + std::cerr << "--cfg_file need to be defined" << std::endl; return -1; } if (FLAGS_image == "" & FLAGS_image_list == "") { @@ -56,7 +56,7 @@ int main(int argc, char** argv) { // PaddleX::Model model; - model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_device); + model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_device); int imgs = 1; auto colormap = PaddleX::GenerateColorMap(model.labels.size()); diff --git a/deploy/openvino/demo/segmenter.cpp b/deploy/openvino/demo/segmenter.cpp index f612e756a0e3113441d619883ee451cc46a1b9c9..37a8538bea16ebf1a3a9014a66fe26c1f66ad78b 100644 --- a/deploy/openvino/demo/segmenter.cpp +++ b/deploy/openvino/demo/segmenter.cpp @@ -25,7 +25,7 @@ DEFINE_string(model_dir, "", "Path of openvino model xml file"); -DEFINE_string(cfg_dir, "", "Path of PaddleX model yaml file"); +DEFINE_string(cfg_file, "", "Path of PaddleX model yaml file"); DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_string(device, "CPU", "Device name"); @@ -39,8 +39,8 @@ int main(int argc, char** argv) { std::cerr << "--model_dir need to be defined" << std::endl; return -1; } - if (FLAGS_cfg_dir == "") { - std::cerr << "--cfg_dir need to be defined" << std::endl; + if (FLAGS_cfg_file == "") { + std::cerr << "--cfg_file need to be defined" << std::endl; return -1; } if (FLAGS_image == "" & FLAGS_image_list == "") { @@ -51,7 +51,7 @@ int main(int argc, char** argv) { // std::cout << "init start" << std::endl; PaddleX::Model model; - model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_device); + model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_device); std::cout << "init done" << std::endl; int imgs = 1; auto colormap = PaddleX::GenerateColorMap(model.labels.size()); diff --git a/deploy/openvino/include/paddlex/paddlex.h b/deploy/openvino/include/paddlex/paddlex.h index 082f178923d34c1e615f52bd391dbe0829599e44..6ac342411b07d8d4f2ab54a51333d2ac342f490b 100755 --- a/deploy/openvino/include/paddlex/paddlex.h +++ b/deploy/openvino/include/paddlex/paddlex.h @@ -39,13 +39,13 @@ namespace PaddleX { class Model { public: void Init(const std::string& model_dir, - const std::string& cfg_dir, + const std::string& cfg_file, std::string device) { - create_predictor(model_dir, cfg_dir, device); + create_predictor(model_dir, cfg_file, device); } void create_predictor(const std::string& model_dir, - const std::string& cfg_dir, + const std::string& cfg_file, std::string device); bool load_config(const std::string& model_dir); diff --git a/deploy/openvino/python/demo.py b/deploy/openvino/python/demo.py index 4406629fd428b30a84ae19f10b0541031f0e0844..93ecaab8e526977402a798c21b8b8c5696f1f70b 100644 --- a/deploy/openvino/python/demo.py +++ b/deploy/openvino/python/demo.py @@ -32,30 +32,20 @@ def arg_parser(): type=str, default='CPU', help="Specify the target device to infer on:[CPU, GPU, FPGA, HDDL, MYRIAD,HETERO]" - "Default value is CPU") + "Default value is CPU") parser.add_argument( - "--img", - "-i", - type=str, - default=None, - help="path to an image files") + "--img", "-i", type=str, default=None, help="path to an image files") parser.add_argument( - "--img_list", - "-l", - type=str, - default=None, - help="Path to a imglist") - + "--img_list", "-l", type=str, default=None, help="Path to a imglist") parser.add_argument( - "--cfg_dir", + "--cfg_file", "-c", type=str, default=None, help="Path to PaddelX model yml file") - return parser @@ -63,16 +53,16 @@ def main(): parser = arg_parser() args = parser.parse_args() model_xml = args.model_dir - model_yaml = args.cfg_dir + model_yaml = args.cfg_file #model init - if("CPU" not in args.device): - predictor = deploy.Predictor(model_xml,model_yaml,args.device) + if ("CPU" not in args.device): + predictor = deploy.Predictor(model_xml, model_yaml, args.device) else: - predictor = deploy.Predictor(model_xml,model_yaml) - + predictor = deploy.Predictor(model_xml, model_yaml) + #predict - if(args.img_list != None): + if (args.img_list != None): f = open(args.img_list) lines = f.readlines() for im_path in lines: @@ -83,5 +73,6 @@ def main(): im_path = args.img predictor.predict(im_path) + if __name__ == "__main__": main() diff --git a/deploy/openvino/scripts/install_third-party.sh b/deploy/openvino/scripts/install_third-party.sh index ea16f6eb0c58b6ca63f39840a93af85de7f23c7b..8824f64a37d0a0c245cfb0be7e047b5828516be1 100644 --- a/deploy/openvino/scripts/install_third-party.sh +++ b/deploy/openvino/scripts/install_third-party.sh @@ -24,7 +24,7 @@ if [ ! -d "./deps/glog" ]; then fi if [ "$ARCH" = "x86" ]; then - OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/x86opencv/opencv.tar.bz2 + OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/x86opencv/opencv.tar.bz2 else OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/armopencv/opencv.tar.bz2 fi diff --git a/deploy/openvino/src/paddlex.cpp b/deploy/openvino/src/paddlex.cpp index ee77d1b268b6d188cce9eafea68b4fe65391e776..08bc5f8b9af7ec942ea912ad0fa702fc9ec00e6f 100755 --- a/deploy/openvino/src/paddlex.cpp +++ b/deploy/openvino/src/paddlex.cpp @@ -20,7 +20,7 @@ namespace PaddleX { void Model::create_predictor(const std::string& model_dir, - const std::string& cfg_dir, + const std::string& cfg_file, std::string device) { InferenceEngine::Core ie; network_ = ie.ReadNetwork( @@ -49,11 +49,11 @@ void Model::create_predictor(const std::string& model_dir, } else { executable_network_ = ie.LoadNetwork(network_, device); } - load_config(cfg_dir); + load_config(cfg_file); } -bool Model::load_config(const std::string& cfg_dir) { - YAML::Node config = YAML::LoadFile(cfg_dir); +bool Model::load_config(const std::string& cfg_file) { + YAML::Node config = YAML::LoadFile(cfg_file); type = config["_Attributes"]["model_type"].as(); name = config["Model"].as(); bool to_rgb = true; diff --git a/deploy/raspberry/demo/classifier.cpp b/deploy/raspberry/demo/classifier.cpp index 03b8b7ae81230821b1260ded7784fe014d05a248..5967e1228560f0649414dc23ee1446d066c9004b 100755 --- a/deploy/raspberry/demo/classifier.cpp +++ b/deploy/raspberry/demo/classifier.cpp @@ -22,7 +22,7 @@ #include "include/paddlex/paddlex.h" DEFINE_string(model_dir, "", "Path of inference model"); -DEFINE_string(cfg_dir, "", "Path of PaddelX model yml file"); +DEFINE_string(cfg_file, "", "Path of PaddelX model yml file"); DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_int32(thread_num, 1, "num of thread to infer"); @@ -35,8 +35,8 @@ int main(int argc, char** argv) { std::cerr << "--model_dir need to be defined" << std::endl; return -1; } - if (FLAGS_cfg_dir == "") { - std::cerr << "--cfg_dir need to be defined" << std::endl; + if (FLAGS_cfg_file == "") { + std::cerr << "--cfg_flie need to be defined" << std::endl; return -1; } if (FLAGS_image == "" & FLAGS_image_list == "") { @@ -46,7 +46,7 @@ int main(int argc, char** argv) { // 加载模型 PaddleX::Model model; - model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_thread_num); + model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_thread_num); std::cout << "init is done" << std::endl; // 进行预测 if (FLAGS_image_list != "") { diff --git a/deploy/raspberry/demo/detector.cpp b/deploy/raspberry/demo/detector.cpp index 7887dcb342968b6da5e197f224f0d1c2bc3b0be5..aa26509d4d5ae3ed737f8876cffd054ad3c27ee6 100755 --- a/deploy/raspberry/demo/detector.cpp +++ b/deploy/raspberry/demo/detector.cpp @@ -29,7 +29,7 @@ using namespace std::chrono; // NOLINT DEFINE_string(model_dir, "", "Path of openvino model xml file"); -DEFINE_string(cfg_dir, "", "Path of PaddleX model yaml file"); +DEFINE_string(cfg_file, "", "Path of PaddleX model yaml file"); DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_int32(thread_num, 1, "num of thread to infer"); @@ -45,8 +45,8 @@ int main(int argc, char** argv) { std::cerr << "--model_dir need to be defined" << std::endl; return -1; } - if (FLAGS_cfg_dir == "") { - std::cerr << "--cfg_dir need to be defined" << std::endl; + if (FLAGS_cfg_file == "") { + std::cerr << "--cfg_file need to be defined" << std::endl; return -1; } if (FLAGS_image == "" & FLAGS_image_list == "") { @@ -56,7 +56,7 @@ int main(int argc, char** argv) { // PaddleX::Model model; - model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_thread_num); + model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_thread_num); int imgs = 1; auto colormap = PaddleX::GenerateColorMap(model.labels.size()); diff --git a/deploy/raspberry/demo/segmenter.cpp b/deploy/raspberry/demo/segmenter.cpp index 2f6ede99f558db99b1c2daabfdb7161c441ef240..4e55942f01d8286ea307e947e20594779a15ee12 100755 --- a/deploy/raspberry/demo/segmenter.cpp +++ b/deploy/raspberry/demo/segmenter.cpp @@ -25,7 +25,7 @@ DEFINE_string(model_dir, "", "Path of openvino model xml file"); -DEFINE_string(cfg_dir, "", "Path of PaddleX model yaml file"); +DEFINE_string(cfg_file, "", "Path of PaddleX model yaml file"); DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_string(save_dir, "", "Path to save visualized image"); @@ -38,8 +38,8 @@ int main(int argc, char** argv) { std::cerr << "--model_dir need to be defined" << std::endl; return -1; } - if (FLAGS_cfg_dir == "") { - std::cerr << "--cfg_dir need to be defined" << std::endl; + if (FLAGS_cfg_file == "") { + std::cerr << "--cfg_file need to be defined" << std::endl; return -1; } if (FLAGS_image == "" & FLAGS_image_list == "") { @@ -50,7 +50,7 @@ int main(int argc, char** argv) { // std::cout << "init start" << std::endl; PaddleX::Model model; - model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_thread_num); + model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_thread_num); std::cout << "init done" << std::endl; int imgs = 1; auto colormap = PaddleX::GenerateColorMap(model.labels.size()); diff --git a/deploy/raspberry/include/paddlex/paddlex.h b/deploy/raspberry/include/paddlex/paddlex.h index 2fc5070e401a3e31d62fc548b98f853c6dee7166..7c4a7065b043140be09cd032a5465f4bb2951398 100755 --- a/deploy/raspberry/include/paddlex/paddlex.h +++ b/deploy/raspberry/include/paddlex/paddlex.h @@ -49,13 +49,13 @@ namespace PaddleX { class Model { public: void Init(const std::string& model_dir, - const std::string& cfg_dir, + const std::string& cfg_file, int thread_num) { - create_predictor(model_dir, cfg_dir, thread_num); + create_predictor(model_dir, cfg_file, thread_num); } void create_predictor(const std::string& model_dir, - const std::string& cfg_dir, + const std::string& cfg_file, int thread_num); bool load_config(const std::string& model_dir); diff --git a/deploy/raspberry/python/demo.py b/deploy/raspberry/python/demo.py index 8102760f2ce8f6bfffd3dbe051933adbcfbd2a63..512426bd380e58538e18ec71e722b1b510380b75 100644 --- a/deploy/raspberry/python/demo.py +++ b/deploy/raspberry/python/demo.py @@ -27,28 +27,18 @@ def arg_parser(): default=None, help="path to openvino model .xml file") parser.add_argument( - "--img", - "-i", - type=str, - default=None, - help="path to an image files") + "--img", "-i", type=str, default=None, help="path to an image files") parser.add_argument( - "--img_list", - "-l", - type=str, - default=None, - help="Path to a imglist") - + "--img_list", "-l", type=str, default=None, help="Path to a imglist") parser.add_argument( - "--cfg_dir", + "--cfg_file", "-c", type=str, default=None, help="Path to PaddelX model yml file") - parser.add_argument( "--thread_num", "-t", @@ -63,8 +53,6 @@ def arg_parser(): default=None, help=" image input shape of model [NCHW] like [1,3,224,244] ") - - return parser @@ -72,16 +60,16 @@ def main(): parser = arg_parser() args = parser.parse_args() model_nb = args.model_dir - model_yaml = args.cfg_dir + model_yaml = args.cfg_file thread_num = args.thread_num input_shape = args.input_shape - input_shape = input_shape[1:-1].split(",",3) - shape = list(map(int,input_shape)) + input_shape = input_shape[1:-1].split(",", 3) + shape = list(map(int, input_shape)) #model init - predictor = deploy.Predictor(model_nb,model_yaml,thread_num,shape) - + predictor = deploy.Predictor(model_nb, model_yaml, thread_num, shape) + #predict - if(args.img_list != None): + if (args.img_list != None): f = open(args.img_list) lines = f.readlines() for im_path in lines: @@ -92,5 +80,6 @@ def main(): im_path = args.img predictor.predict(im_path) + if __name__ == "__main__": main() diff --git a/deploy/raspberry/src/paddlex.cpp b/deploy/raspberry/src/paddlex.cpp index fd89636ed73fb9c88f8a7789a6be16be6c300768..84dce8e19652bf5d2117ee5d798872cc0bf29ab9 100755 --- a/deploy/raspberry/src/paddlex.cpp +++ b/deploy/raspberry/src/paddlex.cpp @@ -20,19 +20,19 @@ namespace PaddleX { void Model::create_predictor(const std::string& model_dir, - const std::string& cfg_dir, + const std::string& cfg_file, int thread_num) { paddle::lite_api::MobileConfig config; config.set_model_from_file(model_dir); config.set_threads(thread_num); - load_config(cfg_dir); + load_config(cfg_file); predictor_ = paddle::lite_api::CreatePaddlePredictor( config); } -bool Model::load_config(const std::string& cfg_dir) { - YAML::Node config = YAML::LoadFile(cfg_dir); +bool Model::load_config(const std::string& cfg_file) { + YAML::Node config = YAML::LoadFile(cfg_file); type = config["_Attributes"]["model_type"].as(); name = config["Model"].as(); bool to_rgb = true; diff --git a/docs/deploy/openvino/linux.md b/docs/deploy/openvino/linux.md index cad533ff0c33741b5e587a31ca6e51dc04eb3bd1..43d414a896ceaff5469e929a598bb060ee3ccd86 100644 --- a/docs/deploy/openvino/linux.md +++ b/docs/deploy/openvino/linux.md @@ -7,9 +7,9 @@ * GCC* 5.4.0 * CMake 3.0+ * PaddleX 1.0+ -* OpenVINO 2020.4 +* OpenVINO 2020.4 * 硬件平台:CPU、VPU - + **说明**:PaddleX安装请参考[PaddleX](https://paddlex.readthedocs.io/zh_CN/develop/install.html) , OpenVINO安装请根据相应的系统参考[OpenVINO-Linux](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html)或者[OpenVINO-Raspbian](https://docs.openvinotoolkit.org/latest/openvino_docs_install_guides_installing_openvino_raspbian.html) 请确保系统已经安装好上述基本软件,并配置好相应环境,**下面所有示例以工作目录 `/root/projects/`演示**。 @@ -35,11 +35,11 @@ git clone https://github.com/PaddlePaddle/PaddleX.git - glog:编译请参考[编译文档](https://github.com/google/glog) -- opencv: 编译请参考 +- opencv: 编译请参考 [编译文档](https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html) - - + + ### Step3: 编译 编译`cmake`的命令在`scripts/build.sh`中,若在树莓派(Raspbian OS)上编译请修改ARCH参数x86为armv7,若自行编译第三方依赖软件请根据Step1中编译软件的实际情况修改主要参数,其主要内容说明如下: ``` @@ -71,7 +71,7 @@ ARCH=x86 | --image | 要预测的图片文件路径 | | --image_list | 按行存储图片路径的.txt文件 | | --device | 运行的平台,可选项{"CPU","MYRIAD"},默认值为"CPU",如在VPU上请使用"MYRIAD"| -| --cfg_dir | PaddleX model 的.yml配置文件 | +| --cfg_file | PaddleX model 的.yml配置文件 | | --save_dir | 可视化结果图片保存地址,仅适用于检测任务,默认值为" "既不保存可视化结果 | ### 样例 @@ -80,7 +80,7 @@ linux系统在CPU下做单张图片的分类任务预测 测试图片 `/path/to/test_img.jpeg` ```shell -./build/classifier --model_dir=/path/to/openvino_model --image=/path/to/test_img.jpeg --cfg_dir=/path/to/PadlleX_model.yml +./build/classifier --model_dir=/path/to/openvino_model --image=/path/to/test_img.jpeg --cfg_file=/path/to/PadlleX_model.yml ``` @@ -95,7 +95,7 @@ linux系统在CPU下做多张图片的检测任务预测,并保存预测可视 ``` ```shell -./build/detector --model_dir=/path/to/models/openvino_model --image_list=/root/projects/images_list.txt --cfg_dir=/path/to/PadlleX_model.yml --save_dir ./output +./build/detector --model_dir=/path/to/models/openvino_model --image_list=/root/projects/images_list.txt --cfg_file=/path/to/PadlleX_model.yml --save_dir ./output ``` `样例三`: @@ -103,7 +103,7 @@ linux系统在CPU下做多张图片的检测任务预测,并保存预测可视 测试图片 `/path/to/test_img.jpeg` ```shell -./build/classifier --model_dir=/path/to/openvino_model --image=/path/to/test_img.jpeg --cfg_dir=/path/to/PadlleX_model.yml --device=MYRIAD +./build/classifier --model_dir=/path/to/openvino_model --image=/path/to/test_img.jpeg --cfg_file=/path/to/PadlleX_model.yml --device=MYRIAD ``` ## 性能测试 @@ -118,7 +118,7 @@ linux系统在CPU下做多张图片的检测任务预测,并保存预测可视 |---|---|---|---| |resnet-50 | 20.56 | 16.12 | 224*224 | |mobilenet-V2 | 5.16 | 2.31 |224*224| -|yolov3-mobilnetv1 |76.63| 46.26|608*608 | +|yolov3-mobilnetv1 |76.63| 46.26|608*608 | `测试二`: 在PC机上插入VPU架构的神经计算棒(NCS2),通过Openvino加速。 @@ -130,7 +130,7 @@ linux系统在CPU下做多张图片的检测任务预测,并保存预测可视 |模型|OpenVINO|输入图片| |---|---|---| |mobilenetV2|24.00|224*224| -|resnet50_vd_ssld|58.53|224*224| +|resnet50_vd_ssld|58.53|224*224| `测试三`: 在树莓派3B上插入VPU架构的神经计算棒(NCS2),通过Openvino加速。 diff --git a/docs/deploy/openvino/python.md b/docs/deploy/openvino/python.md index 0c7b9491f315de93e75ebc617131e5c80327f1a3..e750d11e932f1c40aad59fc1463d47c08b63fa5f 100644 --- a/docs/deploy/openvino/python.md +++ b/docs/deploy/openvino/python.md @@ -19,8 +19,8 @@ | --img | 要预测的图片文件路径 | | --image_list | 按行存储图片路径的.txt文件 | | --device | 运行的平台, 默认值为"CPU" | -| --cfg_dir | PaddleX model 的.yml配置文件 | - +| --cfg_file | PaddleX model 的.yml配置文件 | + ### 样例 `样例一`: 测试图片 `/path/to/test_img.jpeg` @@ -28,7 +28,7 @@ ``` cd /root/projects/python -python demo.py --model_dir /path/to/openvino_model --img /path/to/test_img.jpeg --cfg_dir /path/to/PadlleX_model.yml +python demo.py --model_dir /path/to/openvino_model --img /path/to/test_img.jpeg --cfg_file /path/to/PadlleX_model.yml ``` 样例二`: @@ -45,7 +45,5 @@ python demo.py --model_dir /path/to/openvino_model --img /path/to/test_img.jpeg ``` cd /root/projects/python -python demo.py --model_dir /path/to/models/openvino_model --image_list /root/projects/images_list.txt --cfg_dir=/path/to/PadlleX_model.yml +python demo.py --model_dir /path/to/models/openvino_model --image_list /root/projects/images_list.txt --cfg_file=/path/to/PadlleX_model.yml ``` - - diff --git a/docs/deploy/openvino/windows.md b/docs/deploy/openvino/windows.md index 35b84abe65650921c6d75d532637396075f77be1..a65312561f9628e2d99e16241e79b0a8acd0e83e 100644 --- a/docs/deploy/openvino/windows.md +++ b/docs/deploy/openvino/windows.md @@ -81,7 +81,7 @@ cd D:\projects\PaddleX\deploy\openvino\out\build\x64-Release | --image | 要预测的图片文件路径 | | --image_list | 按行存储图片路径的.txt文件 | | --device | 运行的平台,可选项{"CPU","MYRIAD"},默认值为"CPU",如在VPU上请使用"MYRIAD"| -| --cfg_dir | PaddleX model 的.yml配置文件 | +| --cfg_file | PaddleX model 的.yml配置文件 | | --save_dir | 可视化结果图片保存地址,仅适用于检测任务,默认值为" "既不保存可视化结果 | ### 样例 @@ -90,7 +90,7 @@ cd D:\projects\PaddleX\deploy\openvino\out\build\x64-Release 测试图片 `/path/to/test_img.jpeg` ```shell -./classifier.exe --model_dir=/path/to/openvino_model --image=/path/to/test_img.jpeg --cfg_dir=/path/to/PadlleX_model.yml +./classifier.exe --model_dir=/path/to/openvino_model --image=/path/to/test_img.jpeg --cfg_file=/path/to/PadlleX_model.yml ``` `样例二`: @@ -104,7 +104,7 @@ cd D:\projects\PaddleX\deploy\openvino\out\build\x64-Release ``` ```shell -./detector.exe --model_dir=/path/to/models/openvino_model --image_list=/root/projects/images_list.txt --cfg_dir=/path/to/PadlleX_model.yml --save_dir ./output +./detector.exe --model_dir=/path/to/models/openvino_model --image_list=/root/projects/images_list.txt --cfg_file=/path/to/PadlleX_model.yml --save_dir ./output ``` `样例三`: @@ -112,5 +112,5 @@ cd D:\projects\PaddleX\deploy\openvino\out\build\x64-Release 测试图片 `/path/to/test_img.jpeg` ```shell -.classifier.exe --model_dir=/path/to/openvino_model --image=/path/to/test_img.jpeg --cfg_dir=/path/to/PadlleX_model.yml --device=MYRIAD -``` \ No newline at end of file +.classifier.exe --model_dir=/path/to/openvino_model --image=/path/to/test_img.jpeg --cfg_file=/path/to/PadlleX_model.yml --device=MYRIAD +``` diff --git a/docs/deploy/raspberry/Raspberry.md b/docs/deploy/raspberry/Raspberry.md index fb23cb0c4c16224ebcaa06deff6732ff64cdef0a..a49b9605e9956a288162e4d6fc6bd969f0103d90 100644 --- a/docs/deploy/raspberry/Raspberry.md +++ b/docs/deploy/raspberry/Raspberry.md @@ -4,7 +4,7 @@ PaddleX支持通过Paddle-Lite和基于OpenVINO的神经计算棒(NCS2)这两种 ## 硬件环境配置 -对于尚未安装系统的树莓派首先需要进行系统安装、环境配置等步骤来初始化硬件环境,过程中需要的软硬件如下: +对于尚未安装系统的树莓派首先需要进行系统安装、环境配置等步骤来初始化硬件环境,过程中需要的软硬件如下: - 硬件:micro SD,显示器,键盘,鼠标 - 软件:Raspbian OS @@ -23,7 +23,7 @@ sudo apt-get upgrade ``` ## Paddle-Lite部署 -基于Paddle-Lite的部署目前可以支持PaddleX的分类、分割与检测模型,其实检测模型仅支持YOLOV3 +基于Paddle-Lite的部署目前可以支持PaddleX的分类、分割与检测模型,其实检测模型仅支持YOLOV3 部署的流程包括:PaddleX模型转换与转换后的模型部署 **说明**:PaddleX安装请参考[PaddleX](https://paddlex.readthedocs.io/zh_CN/develop/install.html),Paddle-Lite详细资料请参考[Paddle-Lite](https://paddle-lite.readthedocs.io/zh/latest/index.html) @@ -62,11 +62,11 @@ sudo ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv7hf --arm_lang=gcc - glog:编译请参考[编译文档](https://github.com/google/glog) -- opencv: 编译请参考 +- opencv: 编译请参考 [编译文档](https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html) ### Step4: 编译 -编译`cmake`的命令在`scripts/build.sh`中,修改LITE_DIR为Paddle-Lite预测库目录,若自行编译第三方依赖软件请根据Step1中编译软件的实际情况修改主要参数,其主要内容说明如下: -``` +编译`cmake`的命令在`scripts/build.sh`中,修改LITE_DIR为Paddle-Lite预测库目录,若自行编译第三方依赖软件请根据Step1中编译软件的实际情况修改主要参数,其主要内容说明如下: +``` # Paddle-Lite预编译库的路径 LITE_DIR=/path/to/Paddle-Lite/inference/lib # gflags预编译库的路径 @@ -91,7 +91,7 @@ OPENCV_DIR=$(pwd)/deps/opencv/ | --image | 要预测的图片文件路径 | | --image_list | 按行存储图片路径的.txt文件 | | --thread_num | 预测的线程数,默认值为1 | -| --cfg_dir | PaddleX model 的.yml配置文件 | +| --cfg_file | PaddleX model 的.yml配置文件 | | --save_dir | 可视化结果图片保存地址,仅适用于检测和分割任务,默认值为" "既不保存可视化结果 | ### 样例 @@ -100,8 +100,8 @@ OPENCV_DIR=$(pwd)/deps/opencv/ 测试图片 `/path/to/test_img.jpeg` ```shell -./build/classifier --model_dir=/path/to/nb_model ---image=/path/to/test_img.jpeg --cfg_dir=/path/to/PadlleX_model.yml --thread_num=4 +./build/classifier --model_dir=/path/to/nb_model +--image=/path/to/test_img.jpeg --cfg_file=/path/to/PadlleX_model.yml --thread_num=4 ``` @@ -116,7 +116,7 @@ OPENCV_DIR=$(pwd)/deps/opencv/ ``` ```shell -./build/segmenter --model_dir=/path/to/models/nb_model --image_list=/root/projects/images_list.txt --cfg_dir=/path/to/PadlleX_model.yml --save_dir ./output --thread_num=4 +./build/segmenter --model_dir=/path/to/models/nb_model --image_list=/root/projects/images_list.txt --cfg_file=/path/to/PadlleX_model.yml --save_dir ./output --thread_num=4 ``` ## 性能测试 @@ -153,4 +153,4 @@ OPENCV_DIR=$(pwd)/deps/opencv/ ## NCS2部署 树莓派支持通过OpenVINO在NCS2上跑PaddleX模型预测,目前仅支持PaddleX的分类网络,基于NCS2的方式包含Paddle模型转OpenVINO IR以及部署IR在NCS2上进行预测两个步骤。 - 模型转换请参考:[PaddleX模型转换为OpenVINO IR]('./openvino/export_openvino_model.md'),raspbian OS上的OpenVINO不支持模型转换,需要先在host侧转换FP16的IR。 -- 预测部署请参考[OpenVINO部署](./openvino/linux.md)中VPU在raspbian OS部署的部分 \ No newline at end of file +- 预测部署请参考[OpenVINO部署](./openvino/linux.md)中VPU在raspbian OS部署的部分 diff --git a/docs/deploy/raspberry/python.md b/docs/deploy/raspberry/python.md index aa46aa395f8e80f79d294ce79e013a2135df775f..086b34dcd06709ecc48cce973755c4ada8a84357 100644 --- a/docs/deploy/raspberry/python.md +++ b/docs/deploy/raspberry/python.md @@ -22,10 +22,10 @@ python -m pip install paddlelite | --model_dir | 模型转换生成的.xml文件路径,请保证模型转换生成的三个文件在同一路径下| | --img | 要预测的图片文件路径 | | --image_list | 按行存储图片路径的.txt文件 | -| --cfg_dir | PaddleX model 的.yml配置文件 | +| --cfg_file | PaddleX model 的.yml配置文件 | | --thread_num | 预测的线程数, 默认值为1 | | --input_shape | 模型输入中图片输入的大小[N,C,H.W] | - + ### 样例 `样例一`: 测试图片 `/path/to/test_img.jpeg` @@ -33,7 +33,7 @@ python -m pip install paddlelite ``` cd /root/projects/python -python demo.py --model_dir /path/to/openvino_model --img /path/to/test_img.jpeg --cfg_dir /path/to/PadlleX_model.yml --thread_num 4 --input_shape [1,3,224,224] +python demo.py --model_dir /path/to/openvino_model --img /path/to/test_img.jpeg --cfg_file /path/to/PadlleX_model.yml --thread_num 4 --input_shape [1,3,224,224] ``` 样例二`: @@ -50,5 +50,5 @@ python demo.py --model_dir /path/to/openvino_model --img /path/to/test_img.jpeg ``` cd /root/projects/python -python demo.py --model_dir /path/to/models/openvino_model --image_list /root/projects/images_list.txt --cfg_dir=/path/to/PadlleX_model.yml --thread_num 4 --input_shape [1,3,224,224] -``` \ No newline at end of file +python demo.py --model_dir /path/to/models/openvino_model --image_list /root/projects/images_list.txt --cfg_file=/path/to/PadlleX_model.yml --thread_num 4 --input_shape [1,3,224,224] +```