From 08d61b700e87d017c8f36f4bb7d4c44e698647f9 Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Mon, 27 Apr 2020 20:24:19 +0800 Subject: [PATCH] modify docs --- README.md | 2 + docs/apis/datasets.md | 8 +-- docs/apis/slim.md | 2 +- docs/apis/transforms/cls_transforms.md | 2 +- docs/apis/transforms/det_transforms.md | 2 +- docs/apis/transforms/seg_transforms.md | 2 +- docs/conf.py | 63 +++++++++++-------- docs/how_to_offline_run.md | 39 ++++++++++++ docs/quick_start.md | 3 +- docs/tutorials/compress/classification.md | 14 ++--- docs/tutorials/compress/detection.md | 6 +- docs/tutorials/compress/segmentation.md | 6 +- docs/tutorials/train/classification.md | 2 +- docs/tutorials/train/detection.md | 2 +- docs/tutorials/train/instance_segmentation.md | 2 +- docs/tutorials/train/segmentation.md | 4 +- paddlex/cv/models/slim/prune.py | 2 +- paddlex/utils/logging.py | 2 +- tutorials/compress/README.md | 6 +- .../{mobilenet.py => mobilenetv2.py} | 4 +- 20 files changed, 113 insertions(+), 60 deletions(-) create mode 100644 docs/how_to_offline_run.md rename tutorials/compress/classification/{mobilenet.py => mobilenetv2.py} (97%) diff --git a/README.md b/README.md index 5c36ec1..35c22d0 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ PaddleX +[![Build Status][image-1]][1] +[![License][image-2]]() PaddleX是基于飞桨技术生态的全流程深度学习模型开发工具。具备易集成,易使用,全流程等特点。PaddleX作为深度学习开发工具,不仅提供了开源的内核代码,可供用户灵活使用或集成,同时也提供了配套的前端可视化客户端套件,让用户以可视化地方式进行模型开发,访问[PaddleX官网](https://www.paddlepaddle.org.cn/paddlex/download)获取更多相关细节。 ## 安装 diff --git a/docs/apis/datasets.md b/docs/apis/datasets.md index 34569a9..2855289 100644 --- a/docs/apis/datasets.md +++ b/docs/apis/datasets.md @@ -6,7 +6,7 @@ paddlex.datasets.ImageNet(data_dir, file_list, label_list, transforms=None, num_ ``` 读取ImageNet格式的分类数据集,并对样本进行相应的处理。ImageNet数据集格式的介绍可查看文档:[数据集格式说明](../datasets.md) -示例:[代码文件](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/classification/mobilenetv2.py#L25) +示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/classification/mobilenetv2.py#L25) ### 参数 @@ -27,7 +27,7 @@ paddlex.datasets.VOCDetection(data_dir, file_list, label_list, transforms=None, 读取PascalVOC格式的检测数据集,并对样本进行相应的处理。PascalVOC数据集格式的介绍可查看文档:[数据集格式说明](../datasets.md) -示例:[代码文件](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/detection/yolov3_mobilenetv1.py#L29) +示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/detection/yolov3_mobilenetv1.py#L29) ### 参数 @@ -48,7 +48,7 @@ paddlex.datasets.COCODetection(data_dir, ann_file, transforms=None, num_workers= 读取MSCOCO格式的检测数据集,并对样本进行相应的处理,该格式的数据集同样可以应用到实例分割模型的训练中。MSCOCO数据集格式的介绍可查看文档:[数据集格式说明](../datasets.md) -示例:[代码文件](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/detection/mask_rcnn_r50_fpn.py#L27) +示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/detection/mask_rcnn_r50_fpn.py#L27) ### 参数 @@ -68,7 +68,7 @@ paddlex.datasets.SegDataset(data_dir, file_list, label_list, transforms=None, nu 读取语分分割任务数据集,并对样本进行相应的处理。语义分割任务数据集格式的介绍可查看文档:[数据集格式说明](../datasets.md) -示例:[代码文件](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/segmentation/unet.py#L27) +示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/segmentation/unet.py#L27) ### 参数 diff --git a/docs/apis/slim.md b/docs/apis/slim.md index 800e123..bb064ad 100644 --- a/docs/apis/slim.md +++ b/docs/apis/slim.md @@ -8,7 +8,7 @@ paddlex.slim.cal_params_sensetives(model, save_file, eval_dataset, batch_size=8) 1. 获取模型中可裁剪卷积Kernel的名称。 2. 计算每个可裁剪卷积Kernel不同裁剪率下的敏感度。 【注意】卷积的敏感度是指在不同裁剪率下评估数据集预测精度的损失,通过得到的敏感度,可以决定最终模型需要裁剪的参数列表和各裁剪参数对应的裁剪率。 -[查看使用示例](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/compress/classification/cal_sensitivities_file.py#L33) +[查看使用示例](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/compress/classification/cal_sensitivities_file.py#L33) ### 参数 diff --git a/docs/apis/transforms/cls_transforms.md b/docs/apis/transforms/cls_transforms.md index c6c0cd8..f4ec32c 100644 --- a/docs/apis/transforms/cls_transforms.md +++ b/docs/apis/transforms/cls_transforms.md @@ -7,7 +7,7 @@ paddlex.cls.transforms.Compose(transforms) ``` -根据数据预处理/增强算子对输入数据进行操作。 [使用示例](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/classification/mobilenetv2.py#L13) +根据数据预处理/增强算子对输入数据进行操作。 [使用示例](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/classification/mobilenetv2.py#L13) ### 参数 * **transforms** (list): 数据预处理/数据增强列表。 diff --git a/docs/apis/transforms/det_transforms.md b/docs/apis/transforms/det_transforms.md index 9565415..7d059f9 100644 --- a/docs/apis/transforms/det_transforms.md +++ b/docs/apis/transforms/det_transforms.md @@ -7,7 +7,7 @@ paddlex.det.transforms.Compose(transforms) ``` -根据数据预处理/增强算子对输入数据进行操作。[使用示例](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/detection/yolov3_mobilenetv1.py#L13) +根据数据预处理/增强算子对输入数据进行操作。[使用示例](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/detection/yolov3_mobilenetv1.py#L13) ### 参数 * **transforms** (list): 数据预处理/数据增强列表。 diff --git a/docs/apis/transforms/seg_transforms.md b/docs/apis/transforms/seg_transforms.md index d2b4d92..318026e 100644 --- a/docs/apis/transforms/seg_transforms.md +++ b/docs/apis/transforms/seg_transforms.md @@ -7,7 +7,7 @@ ```python paddlex.seg.transforms.Compose(transforms) ``` -根据数据预处理/数据增强列表对输入数据进行操作。[使用示例](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/segmentation/unet.py#L13) +根据数据预处理/数据增强列表对输入数据进行操作。[使用示例](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/segmentation/unet.py#L13) ### 参数 * **transforms** (list): 数据预处理/数据增强列表。 diff --git a/docs/conf.py b/docs/conf.py index 5aa88d0..a5981ea 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,39 +10,44 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) +import os +import recommonmark -import sphinx_rtd_theme -html_theme = "sphinx_rtd_theme" -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Project information ----------------------------------------------------- project = 'PaddleX' -copyright = '2020, paddlex@baidu.com' -author = 'paddlex@baidu.com' - -# The full version, including alpha/beta/rc tags -release = '0.1.0' - -from recommonmark.parser import CommonMarkParser -source_parsers = { - '.md': CommonMarkParser, -} -source_suffix = ['.rst', '.md'] +copyright = '2020, paddlepaddle' +author = 'paddlepaddle' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx_markdown_tables'] + +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.coverage', + 'sphinx.ext.viewcode', + 'sphinx.ext.mathjax', + 'sphinx.ext.githubpages', + 'sphinx.ext.napoleon', + 'recommonmark', + 'sphinx_markdown_tables', +] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] +# md file can also be parased +source_suffix = ['.rst', '.md'] + +# The master toctree document. +master_doc = 'index' + # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # @@ -50,20 +55,26 @@ templates_path = ['_templates'] # Usually you set "language" from the command line for these cases. language = 'zh_CN' -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -# -#html_theme = 'alabaster' + +# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +if not on_rtd: # only import and set the theme if we're building docs locally + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# otherwise, readthedocs.org uses their theme by default, so no need to specify it # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". + html_static_path = ['_static'] -html_logo = 'images/paddlex.png' + +html_logo = '../images/logo.png' + diff --git a/docs/how_to_offline_run.md b/docs/how_to_offline_run.md new file mode 100644 index 0000000..73e2206 --- /dev/null +++ b/docs/how_to_offline_run.md @@ -0,0 +1,39 @@ +# 无联网模型训练 + +PaddleX在模型训练时,存在以下两种情况需要进行联网下载 +> 1.训练模型时,用户没有配置自定义的预训练模型权重`pretrain_weights`,此时PaddleX会自动联网下载在标准数据集上的预训练模型; +> 2.模型裁剪训练时,用户没有配置自定义的参数敏感度信息文件`sensitivities_file`,并将`sensitivities_file`配置成了'DEFAULT'字符串,此时PaddleX会自动联网下载模型在标准数据集上计算得到的参数敏感度信息文件。 + + +## 如何在没联网的情况下进行模型训练 +> 在训练模型时,不管是正常训练还是裁剪训练,用户可以提前准备好预训练权重或参数敏感度信息文档,只需自定义`pretrain_weights`或`sensitivities_file`, 将其设为本地的路径即可。 + + +## 预训练模型下载地址 +> 以下模型均为分类模型权重(UNet除外),用户在训练模型时,需要**根据分类模型的种类或backbone的种类**,选择对应的模型权重进行下载(目标检测在使用ResNet50作为Backbone时,使用下面表格中的ResNet50_cos作为预训练模型) + +| 模型(点击下载) | 数据集 | +| :------------|:------| +| [ResNet18](https://paddle-imagenet-models-name.bj.bcebos.com/ResNet18_pretrained.tar) | ImageNet | +| [ResNet34](https://paddle-imagenet-models-name.bj.bcebos.com/ResNet34_pretrained.tar) | ImageNet | +| [ResNet50](http://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar) | ImageNet | +| [ResNet101](http://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_pretrained.tar) | ImageNet | +| [ResNet50_vd](https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar) | ImageNet | +| [ResNet101_vd](https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_pretrained.tar) | ImageNet | +| [MobileNetV1](http://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_pretrained.tar) | ImageNet | +| [MobileNetV2_x1.0](https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_pretrained.tar) | ImageNet | +| [MobileNetV2_x0.5](https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_x0_5_pretrained.tar) | ImageNet | +| [MobileNetV2_x2.0](https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_x2_0_pretrained.tar) | ImageNet | +| [MobileNetV2_x0.25](https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_x0_25_pretrained.tar) | ImageNet | +| [MobileNetV2_x1.5](https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_x1_5_pretrained.tar) | ImageNet | +| [MobileNetV3_small](https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_small_x1_0_pretrained.tar) | ImageNet | +| [MobileNetV3_large](https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x1_0_pretrained.tar) | ImageNet | +| [DarkNet53](https://paddle-imagenet-models-name.bj.bcebos.com/DarkNet53_ImageNet1k_pretrained.tar) | ImageNet | +| [DenseNet121](https://paddle-imagenet-models-name.bj.bcebos.com/DenseNet121_pretrained.tar) | ImageNet | +| [DenseNet161](https://paddle-imagenet-models-name.bj.bcebos.com/DenseNet161_pretrained.tar) | ImageNet | +| [DenseNet201](https://paddle-imagenet-models-name.bj.bcebos.com/DenseNet201_pretrained.tar) | ImageNet | +| [ResNet50_cos](https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_cos_pretrained.tar) | ImageNet | +| [Xception41](https://paddle-imagenet-models-name.bj.bcebos.com/Xception41_deeplab_pretrained.tar) | ImageNet | +| [Xception65](https://paddle-imagenet-models-name.bj.bcebos.com/Xception65_deeplab_pretrained.tar) | ImageNet | +| [ShuffleNetV2](https://paddle-imagenet-models-name.bj.bcebos.com/ShuffleNetV2_pretrained.tar) | ImageNet | +| [UNet](https://paddleseg.bj.bcebos.com/models/unet_coco_v3.tgz) | MSCOCO | diff --git a/docs/quick_start.md b/docs/quick_start.md index dfbfe12..4793326 100644 --- a/docs/quick_start.md +++ b/docs/quick_start.md @@ -1,6 +1,6 @@ # 10分钟快速上手使用 -本文档在一个小数据集上展示了如何通过PaddleX进行训练,您可以阅读文档[使用教程-模型训练](/tutorials/train)来了解更多模型任务的训练使用方式。 +本文档在一个小数据集上展示了如何通过PaddleX进行训练,您可以阅读PaddleX的**使用教程**来了解更多模型任务的训练使用方式。 ## 1. 准备蔬菜分类数据集 ``` @@ -88,4 +88,5 @@ Predict Result: Predict Result: [{'score': 0.9999393, 'category': 'bocai', 'cate ## 其它推荐 - 1.[目标检测模型训练](tutorials/train/detection.md) - 2.[语义分割模型训练](tutorials/train/segmentation.md) +- 3.[实例分割模型训练](tutorials/train/instance_segmentation.md) - 3.[模型太大,想要更小的模型,试试模型裁剪吧!](tutorials/compress/classification.md) diff --git a/docs/tutorials/compress/classification.md b/docs/tutorials/compress/classification.md index 464881f..9ab1197 100644 --- a/docs/tutorials/compress/classification.md +++ b/docs/tutorials/compress/classification.md @@ -1,17 +1,17 @@ # 分类模型裁剪 --- -本文档训练代码可直接在PaddleX的Repo中下载,[代码tutorials/compress/classification](http://gitlab.baidu.com/Paddle/PaddleX/tree/develop/tutorials/compress/classification) +本文档训练代码可直接在PaddleX的Repo中下载,[代码tutorials/compress/classification](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/compress/classification) 本文档按如下方式对模型进行了裁剪 > 第一步:在训练数据集上训练MobileNetV2 > 第二步:在验证数据集上计算模型中各个参数的敏感度信息 > 第三步:根据第二步计算的敏感度,设定`eval_metric_loss`,对模型裁剪后重新在训练数据集上训练 ## 步骤一 训练MobileNetV2 -> 模型训练使用文档可以直接参考[分类模型训练](../train/classification.md),本文档在该代码基础上添加了部分参数选项,用户可直接下载模型训练代码[tutorials/compress/classification/mobilenet.py](http://gitlab.baidu.com/Paddle/PaddleX/tree/develop/tutorials/compress/classification/mobilenet.py) +> 模型训练使用文档可以直接参考[分类模型训练](../train/classification.md),本文档在该代码基础上添加了部分参数选项,用户可直接下载模型训练代码[tutorials/compress/classification/mobilenetv2.py](http://gitlab.baidu.com/Paddle/PaddleX/tree/develop/tutorials/compress/classification/mobilenetv2.py) > 使用如下命令开始模型训练 ``` -python mobilenet.py +python mobilenetv2.py ``` ## 步骤二 计算参数敏感度 @@ -23,7 +23,7 @@ import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' import paddlex as pdx -model_dir = './output/mobilenet/best_model' +model_dir = './output/mobilenetv2/best_model' model = pdx.load_model(model_dir) # 定义验证所用的数据集 @@ -38,16 +38,16 @@ pdx.slim.cal_params_sensitivities(model, eval_dataset, batch_size=8) ``` -> 本步骤代码已整理至[tutorials/compress/classification/cal_sensitivities_file.py](http://gitlab.baidu.com/Paddle/PaddleX/tree/develop/tutorials/compress/classification/cal_sensitivities_file.py),用户可直接下载使用 +> 本步骤代码已整理至[tutorials/compress/classification/cal_sensitivities_file.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/compress/classification/cal_sensitivities_file.py),用户可直接下载使用 > 使用如下命令开始计算敏感度 ``` -python cal_sensitivities_file.py --model_dir output/mobilenet/best_model --dataset vegetables_cls --save_file sensitivities.data +python cal_sensitivities_file.py --model_dir output/mobilenetv2/best_model --dataset vegetables_cls --save_file sensitivities.data ``` ## 步骤三 开始裁剪训练 > 本步骤代码与步骤一使用同一份代码文件,使用如下命令开始裁剪训练 ``` -python mobilenet.py --model_dir output/mobilenet/best_model --sensitivities_file sensitivities.data --eval_metric_loss 0.10 +python mobilenetv2.py --model_dir output/mobilenetv2/best_model --sensitivities_file sensitivities.data --eval_metric_loss 0.10 ``` ## 实验效果 diff --git a/docs/tutorials/compress/detection.md b/docs/tutorials/compress/detection.md index fb06870..514e19b 100644 --- a/docs/tutorials/compress/detection.md +++ b/docs/tutorials/compress/detection.md @@ -1,14 +1,14 @@ # 检测模型裁剪 --- -本文档训练代码可直接在PaddleX的Repo中下载,[代码tutorials/compress/detection](http://gitlab.baidu.com/Paddle/PaddleX/tree/develop/tutorials/compress/detection) +本文档训练代码可直接在PaddleX的Repo中下载,[代码tutorials/compress/detection](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/compress/detection) 本文档按如下方式对模型进行了裁剪 > 第一步:在训练数据集上训练YOLOv3 > 第二步:在验证数据集上计算模型中各个参数的敏感度信息 > 第三步:根据第二步计算的敏感度,设定`eval_metric_loss`,对模型裁剪后重新在训练数据集上训练 ## 步骤一 训练YOLOv3 -> 模型训练使用文档可以直接参考[检测模型训练](../train/detection.md),本文档在该代码基础上添加了部分参数选项,用户可直接下载模型训练代码[tutorials/compress/detection/yolov3_mobilnet.py](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop_details/tutorials/compress/detection/yolov3_mobilenet.py) +> 模型训练使用文档可以直接参考[检测模型训练](../train/detection.md),本文档在该代码基础上添加了部分参数选项,用户可直接下载模型训练代码[tutorials/compress/detection/yolov3_mobilnet.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/compress/detection/yolov3_mobilenet.py) > 使用如下命令开始模型训练 ``` python yolov3_mobilenet.py @@ -37,7 +37,7 @@ pdx.slim.cal_params_sensitivities(model, eval_dataset, batch_size=8) ``` -> 本步骤代码已整理至[tutorials/compress/detection/cal_sensitivities_file.py](http://gitlab.baidu.com/Paddle/PaddleX/tree/develop/tutorials/compress/detection/cal_sensitivities_file.py),用户可直接下载使用 +> 本步骤代码已整理至[tutorials/compress/detection/cal_sensitivities_file.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/compress/detection/cal_sensitivities_file.py),用户可直接下载使用 > 使用如下命令开始计算敏感度 ``` python cal_sensitivities_file.py --model_dir output/yolov3_mobile/best_model --dataset insect_det --save_file sensitivities.data diff --git a/docs/tutorials/compress/segmentation.md b/docs/tutorials/compress/segmentation.md index a3a7548..c19a14b 100644 --- a/docs/tutorials/compress/segmentation.md +++ b/docs/tutorials/compress/segmentation.md @@ -1,14 +1,14 @@ # 分割模型裁剪 --- -本文档训练代码可直接在PaddleX的Repo中下载,[代码tutorials/compress/segmentation](http://gitlab.baidu.com/Paddle/PaddleX/tree/develop/tutorials/compress/segmentation) +本文档训练代码可直接在PaddleX的Repo中下载,[代码tutorials/compress/segmentation](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/compress/segmentation) 本文档按如下方式对模型进行了裁剪 > 第一步:在训练数据集上训练UNet > 第二步:在验证数据集上计算模型中各个参数的敏感度信息 > 第三步:根据第二步计算的敏感度,设定`eval_metric_loss`,对模型裁剪后重新在训练数据集上训练 ## 步骤一 训练UNet -> 模型训练使用文档可以直接参考[检测模型训练](../train/segmentation.md),本文档在该代码基础上添加了部分参数选项,用户可直接下载模型训练代码[tutorials/compress/segmentation/unet.py](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop_details/tutorials/compress/segmentation/unet.py) +> 模型训练使用文档可以直接参考[检测模型训练](../train/segmentation.md),本文档在该代码基础上添加了部分参数选项,用户可直接下载模型训练代码[tutorials/compress/segmentation/unet.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/compress/segmentation/unet.py) > 使用如下命令开始模型训练 ``` python unet.py @@ -37,7 +37,7 @@ pdx.slim.cal_params_sensitivities(model, eval_dataset, batch_size=8) ``` -> 本步骤代码已整理至[tutorials/compress/detection/cal_sensitivities_file.py](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop_details/tutorials/compress/segmentation/cal_sensitivities_file.py),用户可直接下载使用 +> 本步骤代码已整理至[tutorials/compress/detection/cal_sensitivities_file.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/compress/segmentation/cal_sensitivities_file.py),用户可直接下载使用 > 使用如下命令开始计算敏感度 ``` python cal_sensitivities_file.py --model_dir output/unet/best_model --dataset optic_disc_seg --save_file sensitivities.data diff --git a/docs/tutorials/train/classification.md b/docs/tutorials/train/classification.md index 42a24a2..0fe9860 100644 --- a/docs/tutorials/train/classification.md +++ b/docs/tutorials/train/classification.md @@ -1,7 +1,7 @@ # 训练图像分类模型 --- -本文档训练代码可参考PaddleX的[代码tutorial/train/classification/mobilenetv2.py](http://gitlab.baidu.com/Paddle/PaddleX/tree/develop/tutorials/train/classification/mobilenetv2.py) +本文档训练代码可参考PaddleX的[代码tutorial/train/classification/mobilenetv2.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/classification/mobilenetv2.py) **1.下载并解压训练所需的数据集** diff --git a/docs/tutorials/train/detection.md b/docs/tutorials/train/detection.md index 01bf568..edc624b 100644 --- a/docs/tutorials/train/detection.md +++ b/docs/tutorials/train/detection.md @@ -2,7 +2,7 @@ ------ -更多检测模型在VOC数据集或COCO数据集上的训练代码可参考[代码tutorials/train/detection/faster_rcnn_r50_fpn.py](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/detection/faster_rcnn_r50_fpn.py)、[代码tutorials/train/detection/yolov3_mobilenetv1.py](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/detection/yolov3_mobilenetv1.py)。 +更多检测模型在VOC数据集或COCO数据集上的训练代码可参考[代码tutorials/train/detection/faster_rcnn_r50_fpn.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/detection/faster_rcnn_r50_fpn.py)、[代码tutorials/train/detection/yolov3_mobilenetv1.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/detection/yolov3_mobilenetv1.py)。 **1.下载并解压训练所需的数据集** diff --git a/docs/tutorials/train/instance_segmentation.md b/docs/tutorials/train/instance_segmentation.md index 80445c3..7eba292 100644 --- a/docs/tutorials/train/instance_segmentation.md +++ b/docs/tutorials/train/instance_segmentation.md @@ -2,7 +2,7 @@ ------ -本文档训练代码可直接下载[代码tutorials/train/detection/mask_rcnn_r50_fpn.py](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/detection/mask_rcnn_r50_fpn.py)。 +本文档训练代码可直接下载[代码tutorials/train/detection/mask_rcnn_r50_fpn.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/detection/mask_rcnn_r50_fpn.py)。 **1.下载并解压训练所需的数据集** diff --git a/docs/tutorials/train/segmentation.md b/docs/tutorials/train/segmentation.md index 39b295d..a8a48e5 100644 --- a/docs/tutorials/train/segmentation.md +++ b/docs/tutorials/train/segmentation.md @@ -1,7 +1,7 @@ # 训练语义分割模型 --- -更多语义分割模型在Cityscapes数据集上的训练代码可参考[代码tutorials/train/segmentation/unet.py](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/segmentation/unet.py)、[代码tutorials/train/segmentation/deeplabv3p.py](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/segmentation/deeplabv3p.py)。 +更多语义分割模型在Cityscapes数据集上的训练代码可参考[代码tutorials/train/segmentation/unet.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/segmentation/unet.py)、[代码tutorials/train/segmentation/deeplabv3p.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/segmentation/deeplabv3p.py)。 **1.下载并解压训练所需的数据集** @@ -57,7 +57,7 @@ eval_dataset = pdx.datasets.SegDataset( **4.创建DeepLabv3+模型,并进行训练** -> 创建DeepLabv3+模型,`num_classes` 需要设置为不包含背景类的类别数,即: 目标类别数量(1),详细代码可参见[demo](http://gitlab.baidu.com/Paddle/PaddleX/blob/develop/tutorials/train/segmentation/deeplabv3p.py#L44)。 +> 创建DeepLabv3+模型,`num_classes` 需要设置为不包含背景类的类别数,即: 目标类别数量(1),详细代码可参见[demo](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/segmentation/deeplabv3p.py#L44)。 ```python num_classes = num_classes diff --git a/paddlex/cv/models/slim/prune.py b/paddlex/cv/models/slim/prune.py index 1a57c1d..5a8c518 100644 --- a/paddlex/cv/models/slim/prune.py +++ b/paddlex/cv/models/slim/prune.py @@ -73,7 +73,7 @@ def sensitivity(program, logging.info( "Total evaluate iters={}, current={}, progress={}, eta={}". format( - total_evaluate_iters, current_iter, progress, + total_evaluate_iters, current_iter+1, progress, seconds_to_hms( int(cost * (total_evaluate_iters - current_iter)))), use_color=True) diff --git a/paddlex/utils/logging.py b/paddlex/utils/logging.py index 42aa3e4..2cb3364 100644 --- a/paddlex/utils/logging.py +++ b/paddlex/utils/logging.py @@ -19,6 +19,7 @@ import colorama from colorama import init import paddlex +init(autorest=True) levels = {0: 'ERROR', 1: 'WARNING', 2: 'INFO', 3: 'DEBUG'} @@ -28,7 +29,6 @@ def log(level=2, message="", use_color=False): current_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array) if paddlex.log_level >= level: if use_color: - init(autoreset=True) print("\033[1;31;40m{} [{}]\t{}\033[0m".format( current_time, levels[level], message).encode("utf-8").decode("latin1")) diff --git a/tutorials/compress/README.md b/tutorials/compress/README.md index 5b7470d..5bfcbd4 100644 --- a/tutorials/compress/README.md +++ b/tutorials/compress/README.md @@ -20,14 +20,14 @@ PaddleX提供了两种裁剪训练方式, 1. 第1种方法,用户自行计算裁剪配置 ``` # 训练模型 -python classification/mobilenet.py +python classification/mobilenetv2.py # 计算模型参数敏感度 python classification/cal_sensitivities_file.py --model_dir=output/mobilenetv2/epoch_10 --save_file=./sensitivities.data # 裁剪训练 -python classification/mobilenet.py --model_dir=output/mobilenetv2/epoch_10 --sensetive_file=./sensitivities.data --eval_metric_loss=0.05 +python classification/mobilenetv2.py --model_dir=output/mobilenetv2/epoch_10 --sensitivities_file=./sensitivities.data --eval_metric_loss=0.05 ``` 2. 第2种方法,使用PaddleX预先计算好的参数敏感度文件 ``` # 自动下载PaddleX预先在ImageNet上计算好的参数敏感度信息文件 -python classification/mobilenet.py --sensitivities_file=DEFAULT --eval_metric_loss=0.05 +python classification/mobilenetv2.py --sensitivities_file=DEFAULT --eval_metric_loss=0.05 ``` diff --git a/tutorials/compress/classification/mobilenet.py b/tutorials/compress/classification/mobilenetv2.py similarity index 97% rename from tutorials/compress/classification/mobilenet.py rename to tutorials/compress/classification/mobilenetv2.py index 4c4acb6..7ab2a60 100644 --- a/tutorials/compress/classification/mobilenet.py +++ b/tutorials/compress/classification/mobilenetv2.py @@ -63,14 +63,14 @@ def train(model_dir=None, sensitivities_file=None, eval_metric_loss=0.05): model_dir) pretrain_weights = model_dir - save_dir = './output/mobilenet' + save_dir = './output/mobilenetv2' if sensitivities_file is not None: # DEFAULT 指使用模型预置的参数敏感度信息作为裁剪依据 if sensitivities_file != "DEFAULT": assert os.path.exists( sensitivities_file), "Path {} not exist".format( sensitivities_file) - save_dir = './output/mobilenet_prune' + save_dir = './output/mobilenetv2_prune' model.train( num_epochs=10, -- GitLab