diff --git a/.github/ISSUE_TEMPLATE/1_data.md b/.github/ISSUE_TEMPLATE/1_data.md new file mode 100644 index 0000000000000000000000000000000000000000..05627aa353d1cf06074445d2bb5344d94727fedf --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1_data.md @@ -0,0 +1,6 @@ +--- +name: 1. 数据类问题 +about: 数据标注、格式转换等问题 +--- + +说明数据类型(图像分类、目标检测、实例分割或语义分割) diff --git a/.github/ISSUE_TEMPLATE/2_train.md b/.github/ISSUE_TEMPLATE/2_train.md new file mode 100644 index 0000000000000000000000000000000000000000..489159731bfef42773dffa15cd30582d5c53f992 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2_train.md @@ -0,0 +1,6 @@ +--- +name: 2. 模型训练 +about: 模型训练中的问题 +--- + +如模型训练出错,建议贴上模型训练代码,以便开发人员分析,并快速响应 diff --git a/.github/ISSUE_TEMPLATE/3_deploy.md b/.github/ISSUE_TEMPLATE/3_deploy.md new file mode 100644 index 0000000000000000000000000000000000000000..d012d10125c957e702f3877dc087b7331baceb0a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/3_deploy.md @@ -0,0 +1,6 @@ +--- +name: 3. 模型部署 +about: 模型部署相关问题,包括C++、Python、Paddle Lite等 +--- + +说明您的部署环境,部署需求,模型类型和应用场景等,便于开发人员快速响应。 diff --git a/.github/ISSUE_TEMPLATE/4_gui.md b/.github/ISSUE_TEMPLATE/4_gui.md new file mode 100644 index 0000000000000000000000000000000000000000..053f70da760ce35d6b4f53a00c81770b3ff48bf1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/4_gui.md @@ -0,0 +1,6 @@ +--- +name: 4. PaddleX GUI使用问题 +about: Paddle GUI客户端使用问题 +--- + +PaddleX GUI: https://www.paddlepaddle.org.cn/paddle/paddleX diff --git a/.github/ISSUE_TEMPLATE/5_other.md b/.github/ISSUE_TEMPLATE/5_other.md new file mode 100644 index 0000000000000000000000000000000000000000..8ddfe49b544621918355f5c114c1124bdecc8ef3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/5_other.md @@ -0,0 +1,4 @@ +--- +name: 5. 其它类型问题 +about: 所有问题都可以在这里提 +--- diff --git a/deploy/cpp/CMakeLists.txt b/deploy/cpp/CMakeLists.txt index 2a9680563b991d1d9fb9d26aa5167c8718f68c0e..349afa2cae5bf40721cafdf38bbf28ddd621beeb 100644 --- a/deploy/cpp/CMakeLists.txt +++ b/deploy/cpp/CMakeLists.txt @@ -50,7 +50,9 @@ endmacro() if (WITH_ENCRYPTION) -add_definitions( -DWITH_ENCRYPTION=${WITH_ENCRYPTION}) + if (NOT (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")) + add_definitions( -DWITH_ENCRYPTION=${WITH_ENCRYPTION}) + endif() endif() if (WITH_MKL) @@ -268,9 +270,11 @@ endif() if(WITH_ENCRYPTION) if(NOT WIN32) + if (NOT (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")) include_directories("${ENCRYPTION_DIR}/include") link_directories("${ENCRYPTION_DIR}/lib") set(DEPS ${DEPS} ${ENCRYPTION_DIR}/lib/libpmodel-decrypt${CMAKE_SHARED_LIBRARY_SUFFIX}) + endif() else() include_directories("${ENCRYPTION_DIR}/include") link_directories("${ENCRYPTION_DIR}/lib") @@ -335,7 +339,6 @@ if (WIN32 AND WITH_MKL) COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll ) add_custom_command(TARGET video_classifier POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll @@ -357,7 +360,6 @@ if (WIN32 AND WITH_MKL) COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll ) # for encryption if (EXISTS "${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll") diff --git a/deploy/cpp/include/paddlex/paddlex.h b/deploy/cpp/include/paddlex/paddlex.h index e0d0569341198d0a0b2a8c6d0637c3f5a61e1f3f..00b1a05ac8127d403dd7325f3357ece75ec23a58 100644 --- a/deploy/cpp/include/paddlex/paddlex.h +++ b/deploy/cpp/include/paddlex/paddlex.h @@ -175,7 +175,7 @@ class Model { * @return true if predict successfully * */ bool predict(const std::vector &im_batch, - std::vector *result, + std::vector *results, int thread_num = 1); /* @@ -201,7 +201,7 @@ class Model { * @return true if predict successfully * */ bool predict(const std::vector &im_batch, - std::vector *result, + std::vector *results, int thread_num = 1); // model type, include 3 type: classifier, detector, segmenter diff --git a/deploy/cpp/include/paddlex/visualize.h b/deploy/cpp/include/paddlex/visualize.h index 9b80ca367bc8e45334c951cb6dd32069c67c9dbd..c64fa0addcca451db56766db56fe237a8ed35dc0 100644 --- a/deploy/cpp/include/paddlex/visualize.h +++ b/deploy/cpp/include/paddlex/visualize.h @@ -23,7 +23,7 @@ #else // Linux/Unix #include // #include -#ifdef __arm__ // for arm +#if defined(__arm__) || defined(__aarch64__) // for arm #include #include #else diff --git a/deploy/cpp/scripts/jetson_bootstrap.sh b/deploy/cpp/scripts/jetson_bootstrap.sh deleted file mode 100644 index ebd95d0f20439674bbae2628ab7f8d89b7b4beca..0000000000000000000000000000000000000000 --- a/deploy/cpp/scripts/jetson_bootstrap.sh +++ /dev/null @@ -1,10 +0,0 @@ -# download pre-compiled opencv lib -OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/tools/opencv3_aarch.tgz -if [ ! -d "./deps/opencv3" ]; then - mkdir -p deps - cd deps - wget -c ${OPENCV_URL} - tar xvfz opencv3_aarch.tgz - rm -rf opencv3_aarch.tgz - cd .. -fi diff --git a/deploy/cpp/scripts/jetson_build.sh b/deploy/cpp/scripts/jetson_build.sh index 95bec3cac95be5cf686d63ec5b0f49f62e706586..bb2957e351900872189773eeaa41a75d36ec3471 100644 --- a/deploy/cpp/scripts/jetson_build.sh +++ b/deploy/cpp/scripts/jetson_build.sh @@ -14,14 +14,7 @@ WITH_STATIC_LIB=OFF # CUDA 的 lib 路径 CUDA_LIB=/usr/local/cuda/lib64 # CUDNN 的 lib 路径 -CUDNN_LIB=/usr/local/cuda/lib64 - -# 是否加载加密后的模型 -WITH_ENCRYPTION=OFF - -# OPENCV 路径, 如果使用自带预编译版本可不修改 -sh $(pwd)/scripts/jetson_bootstrap.sh # 下载预编译版本的opencv -OPENCV_DIR=$(pwd)/deps/opencv3 +CUDNN_LIB=/usr/lib/aarch64-linux-gnu # 以下无需改动 rm -rf build @@ -31,12 +24,9 @@ cmake .. \ -DWITH_GPU=${WITH_GPU} \ -DWITH_MKL=${WITH_MKL} \ -DWITH_TENSORRT=${WITH_TENSORRT} \ - -DWITH_ENCRYPTION=${WITH_ENCRYPTION} \ -DTENSORRT_DIR=${TENSORRT_DIR} \ -DPADDLE_DIR=${PADDLE_DIR} \ -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ - -DCUDNN_LIB=${CUDNN_LIB} \ - -DENCRYPTION_DIR=${ENCRYPTION_DIR} \ - -DOPENCV_DIR=${OPENCV_DIR} + -DCUDNN_LIB=${CUDNN_LIB} make diff --git a/deploy/cpp/src/paddlex.cpp b/deploy/cpp/src/paddlex.cpp index cf1dfc955c43f9a61539e93a34c77c6ab4b198a9..1bd30863e894910581384296edd2f656b79ffe21 100644 --- a/deploy/cpp/src/paddlex.cpp +++ b/deploy/cpp/src/paddlex.cpp @@ -225,6 +225,8 @@ bool Model::predict(const std::vector& im_batch, outputs_.resize(size); output_tensor->copy_to_cpu(outputs_.data()); // 对模型输出结果进行后处理 + (*results).clear(); + (*results).resize(batch_size); int single_batch_size = size / batch_size; for (int i = 0; i < batch_size; ++i) { auto start_ptr = std::begin(outputs_); @@ -343,7 +345,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) { } bool Model::predict(const std::vector& im_batch, - std::vector* result, + std::vector* results, int thread_num) { for (auto& inputs : inputs_batch_) { inputs.clear(); @@ -467,6 +469,8 @@ bool Model::predict(const std::vector& im_batch, auto lod_vector = output_box_tensor->lod(); int num_boxes = size / 6; // 解析预测框box + (*results).clear(); + (*results).resize(batch_size); for (int i = 0; i < lod_vector[0].size() - 1; ++i) { for (int j = lod_vector[0][i]; j < lod_vector[0][i + 1]; ++j) { Box box; @@ -480,7 +484,7 @@ bool Model::predict(const std::vector& im_batch, float w = xmax - xmin + 1; float h = ymax - ymin + 1; box.coordinate = {xmin, ymin, w, h}; - (*result)[i].boxes.push_back(std::move(box)); + (*results)[i].boxes.push_back(std::move(box)); } } @@ -499,9 +503,9 @@ bool Model::predict(const std::vector& im_batch, output_mask_tensor->copy_to_cpu(output_mask.data()); int mask_idx = 0; for (int i = 0; i < lod_vector[0].size() - 1; ++i) { - (*result)[i].mask_resolution = output_mask_shape[2]; - for (int j = 0; j < (*result)[i].boxes.size(); ++j) { - Box* box = &(*result)[i].boxes[j]; + (*results)[i].mask_resolution = output_mask_shape[2]; + for (int j = 0; j < (*results)[i].boxes.size(); ++j) { + Box* box = &(*results)[i].boxes[j]; int category_id = box->category_id; auto begin_mask = output_mask.begin() + (mask_idx * classes + category_id) * mask_pixels; @@ -624,7 +628,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { } bool Model::predict(const std::vector& im_batch, - std::vector* result, + std::vector* results, int thread_num) { for (auto& inputs : inputs_batch_) { inputs.clear(); @@ -647,8 +651,8 @@ bool Model::predict(const std::vector& im_batch, } int batch_size = im_batch.size(); - (*result).clear(); - (*result).resize(batch_size); + (*results).clear(); + (*results).resize(batch_size); int h = inputs_batch_[0].new_im_size_[0]; int w = inputs_batch_[0].new_im_size_[1]; auto im_tensor = predictor_->GetInputTensor("image"); @@ -680,14 +684,14 @@ bool Model::predict(const std::vector& im_batch, int single_batch_size = size / batch_size; for (int i = 0; i < batch_size; ++i) { - (*result)[i].label_map.data.resize(single_batch_size); - (*result)[i].label_map.shape.push_back(1); + (*results)[i].label_map.data.resize(single_batch_size); + (*results)[i].label_map.shape.push_back(1); for (int j = 1; j < output_label_shape.size(); ++j) { - (*result)[i].label_map.shape.push_back(output_label_shape[j]); + (*results)[i].label_map.shape.push_back(output_label_shape[j]); } std::copy(output_labels_iter + i * single_batch_size, output_labels_iter + (i + 1) * single_batch_size, - (*result)[i].label_map.data.data()); + (*results)[i].label_map.data.data()); } // 获取预测置信度scoremap @@ -704,29 +708,29 @@ bool Model::predict(const std::vector& im_batch, int single_batch_score_size = size / batch_size; for (int i = 0; i < batch_size; ++i) { - (*result)[i].score_map.data.resize(single_batch_score_size); - (*result)[i].score_map.shape.push_back(1); + (*results)[i].score_map.data.resize(single_batch_score_size); + (*results)[i].score_map.shape.push_back(1); for (int j = 1; j < output_score_shape.size(); ++j) { - (*result)[i].score_map.shape.push_back(output_score_shape[j]); + (*results)[i].score_map.shape.push_back(output_score_shape[j]); } std::copy(output_scores_iter + i * single_batch_score_size, output_scores_iter + (i + 1) * single_batch_score_size, - (*result)[i].score_map.data.data()); + (*results)[i].score_map.data.data()); } // 解析输出结果到原图大小 for (int i = 0; i < batch_size; ++i) { - std::vector label_map((*result)[i].label_map.data.begin(), - (*result)[i].label_map.data.end()); - cv::Mat mask_label((*result)[i].label_map.shape[1], - (*result)[i].label_map.shape[2], + std::vector label_map((*results)[i].label_map.data.begin(), + (*results)[i].label_map.data.end()); + cv::Mat mask_label((*results)[i].label_map.shape[1], + (*results)[i].label_map.shape[2], CV_8UC1, label_map.data()); - cv::Mat mask_score((*result)[i].score_map.shape[2], - (*result)[i].score_map.shape[3], + cv::Mat mask_score((*results)[i].score_map.shape[2], + (*results)[i].score_map.shape[3], CV_32FC1, - (*result)[i].score_map.data.data()); + (*results)[i].score_map.data.data()); int idx = 1; int len_postprocess = inputs_batch_[i].im_size_before_resize_.size(); for (std::vector::reverse_iterator iter = @@ -762,12 +766,12 @@ bool Model::predict(const std::vector& im_batch, } ++idx; } - (*result)[i].label_map.data.assign(mask_label.begin(), + (*results)[i].label_map.data.assign(mask_label.begin(), mask_label.end()); - (*result)[i].label_map.shape = {mask_label.rows, mask_label.cols}; - (*result)[i].score_map.data.assign(mask_score.begin(), + (*results)[i].label_map.shape = {mask_label.rows, mask_label.cols}; + (*results)[i].score_map.data.assign(mask_score.begin(), mask_score.end()); - (*result)[i].score_map.shape = {mask_score.rows, mask_score.cols}; + (*results)[i].score_map.shape = {mask_score.rows, mask_score.cols}; } return true; } diff --git a/docs/gui/download.md b/docs/gui/download.md index 77bb9962b37498ec3279a51cdc1faa34da1f498b..0cccdb4e95330b29f60a61862bd9e6ba7d5b88c9 100644 --- a/docs/gui/download.md +++ b/docs/gui/download.md @@ -1,10 +1,10 @@ -## PaddleX GUI安装 +## PaddleX GUI安装 PaddleX GUI是提升项目开发效率的核心模块,开发者可快速完成深度学习模型全流程开发。我们诚挚地邀请您前往 [官网](https://www.paddlepaddle.org.cn/paddle/paddleX)下载试用PaddleX GUI可视化前端,并获得您宝贵的意见或开源项目贡献。 -#### 安装推荐环境 +### 安装推荐环境 * **操作系统**: * Windows7/8/10(推荐Windows 10); diff --git a/docs/gui/index.rst b/docs/gui/index.rst index f823b428bf09babffc8da3c4881d531f047a6099..ad1f6ab44bb9902455d8e190f017f42f3ab290a0 100755 --- a/docs/gui/index.rst +++ b/docs/gui/index.rst @@ -3,40 +3,48 @@ PaddleX GUI PaddleX GUI是基于PaddleX实现的可视化开发客户端。开发者以点选、键入的方式快速体验深度学习模型开发的全流程。不仅可以作为您提升深度学习模型开发效率的工具,更可以作为您们应用PaddleX API搭建专属的行业软件/应用的示例参照。 -PaddleX GUI 当前提供Windows,Mac,Ubuntu三种版本一键绿色安装的方式。请至[飞桨官网](https://www.paddlepaddle.org.cn/)下载您需要的版本。 +PaddleX GUI 当前提供Windows,Mac,Ubuntu三种版本一键绿色安装的方式。请至飞桨官网:https://www.paddlepaddle.org.cn/paddle/paddleX 下载您需要的版本。 功能简介 --------------------------------------- PaddleX GUI是PaddleX API的衍生品,它在集成API功能的基础上,额外提供了可视化分析、评估等附加功能,致力于为开发者带来极致顺畅的开发体验。其拥有以下独特的功能: -- **全流程打通** -PaddleX GUI覆盖深度学习模型开发必经的**数据处理**、**超参配置**、**模型训练及优化**、**模型发布**全流程,无需开发一行代码,即可得到高性深度学习推理模型。 +全流程打通 +>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> +PaddleX GUI覆盖深度学习模型开发必经的 **数据处理** 、 **超参配置** 、 **模型训练及优化** 、 **模型发布** 全流程,无需开发一行代码,即可得到高性深度学习推理模型。 -- **数据集智能分析** -详细的数据结构说明,并提供**数据标签自动校验**。支持**可视化数据预览**、**数据分布图表展示**、**一键数据集切分**等实用功能 +数据集智能分析 +>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> +详细的数据结构说明,并提供 **数据标签自动校验** 。支持 **可视化数据预览** 、 **数据分布图表展示** 、 **一键数据集切分** 等实用功能 -- **自动超参推荐** -集成飞桨团队长时间产业实践经验,根据用户选择的模型类别、骨架网络等,提供多种针对性优化的**预训练模型**,并**提供推荐超参配置**,可**一键开启多种优化策略** +自动超参推荐 +>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> +集成飞桨团队长时间产业实践经验,根据用户选择的模型类别、骨架网络等,提供多种针对性优化的 **预训练模型** ,并 **提供推荐超参配置** ,可 **一键开启多种优化策略** -- **可视化模型评估** -集成**可视化分析工具:VisualDL**, 以线性图表的形式展示acc、lr等关键参数在训练过程中的变化趋势。提供**混淆矩阵**等实用方法,帮助快速定位问题,加速调参。模型评估报告一键导出,方便项目复盘分析。 +可视化模型评估 +>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> +集成 **可视化分析工具:VisualDL** , 以线性图表的形式展示acc、lr等关键参数在训练过程中的变化趋势。提供 **混淆矩阵** 等实用方法,帮助快速定位问题,加速调参。模型评估报告一键导出,方便项目复盘分析。 -- **模型裁剪及量化** +模型裁剪及量化 +>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 一键启动模型裁剪、量化,在不同阶段为开发者提供模型优化的策略,满足不同环境对模型性能的需求。 -- **预训练模型管理** +预训练模型管理 +>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 可对历史训练模型进行保存及管理,未进行裁剪的模型可以保存为预训练模型,在后续任务中使用。 -- **可视化模型测试** +可视化模型测试 +>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 客户端直接展示模型预测效果,无需上线即可进行效果评估 -- **模型多端部署** +模型多端部署 +>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 点选式选择模型发布平台、格式,一键导出预测模型,并匹配完善的模型预测部署说明文档,贴心助力产业端到端项目落地 .. toctree:: :maxdepth: 2 - :caption: 文档目录: + :caption: 文档目录 download.md how_to_use.md diff --git a/docs/train/semantic_segmentation.md b/docs/train/semantic_segmentation.md index 391df0aca7b3103dc89068cc7a2603bcc86226b0..fe7d9293f127c9f1fd80aafbdf9431ecbbc6a1af 100644 --- a/docs/train/semantic_segmentation.md +++ b/docs/train/semantic_segmentation.md @@ -12,7 +12,7 @@ PaddleX目前提供了DeepLabv3p、UNet、HRNet和FastSCNN四种语义分割结 | :---------------- | :------- | :------- | :--------- | :--------- | :----- | | [DeepLabv3p-MobileNetV2-x0.25](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2_x0.25.py) | - | 2.9MB | - | - | 模型小,预测速度快,适用于低性能或移动端设备 | | [DeepLabv3p-MobileNetV2-x1.0](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2.py) | 69.8% | 11MB | - | - | 模型小,预测速度快,适用于低性能或移动端设备 | -| [DeepLabv3p-Xception65](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/semantic_segmentation/deeplabv3p_xception65.pyy) | 79.3% | 158MB | - | - | 模型大,精度高,适用于服务端 | +| [DeepLabv3p-Xception65](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/semantic_segmentation/deeplabv3p_xception65.py) | 79.3% | 158MB | - | - | 模型大,精度高,适用于服务端 | | [UNet](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/semantic_segmentation/unet.py) | - | 52MB | - | - | 模型较大,精度高,适用于服务端 | | [HRNet](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/semantic_segmentation/hrnet.py) | 79.4% | 37MB | - | - | 模型较小,模型精度高,适用于服务端部署 | | [FastSCNN](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/semantic_segmentation/fast_scnn.py) | - | 4.5MB | - | - | 模型小,预测速度快,适用于低性能或移动端设备 | diff --git a/tutorials/train/image_classification/README.md b/tutorials/train/image_classification/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9d09c6e274e5315d650de7010041414d02da8740 --- /dev/null +++ b/tutorials/train/image_classification/README.md @@ -0,0 +1,20 @@ +# 图像分类训练示例 + +本目录下为图像分类示例代码,用户在安装完PaddlePaddle和PaddleX即可直接进行训练。 + +- [PaddlePaddle安装](https://www.paddlepaddle.org.cn/install/quick) +- [PaddleX安装](https://paddlex.readthedocs.io/zh_CN/develop/install.html) + +## 模型训练 +如下所示,直接下载代码后运行即可,代码会自动下载训练数据 +``` +python mobilenetv3_small_ssld.py +``` + +## VisualDL可视化训练指标 +在模型训练过程,在`train`函数中,将`use_vdl`设为True,则训练过程会自动将训练日志以VisualDL的格式打点在`save_dir`(用户自己指定的路径)下的`vdl_log`目录,用户可以使用如下命令启动VisualDL服务,查看可视化指标 +``` +visualdl --logdir output/mobilenetv3_small_ssld/vdl_log --port 8001 +``` + +服务启动后,使用浏览器打开 https://0.0.0.0:8001 或 https://localhost:8001