From 25d5b693b67757a8f5e5f0c8cf3532b141b04aab Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Sun, 12 Jul 2020 10:12:48 +0000 Subject: [PATCH] update tutorials --- docs/apis/datasets.md | 16 ++++++++-------- docs/quick_start.md | 16 +++++++++------- tutorials/train/image_classification/alexnet.py | 12 +++++++++++- .../train/image_classification/mobilenetv2.py | 9 +++++++++ .../mobilenetv3_small_ssld.py | 9 +++++++++ .../image_classification/resnet50_vd_ssld.py | 9 +++++++++ .../train/image_classification/shufflenetv2.py | 9 +++++++++ .../instance_segmentation/mask_rcnn_hrnet_fpn.py | 10 +++++++++- .../instance_segmentation/mask_rcnn_r18_fpn.py | 10 +++++++++- .../instance_segmentation/mask_rcnn_r50_fpn.py | 10 +++++++++- .../object_detection/faster_rcnn_hrnet_fpn.py | 10 +++++++++- .../object_detection/faster_rcnn_r18_fpn.py | 12 ++++++++++++ .../object_detection/faster_rcnn_r50_fpn.py | 12 ++++++++++++ .../train/object_detection/yolov3_darknet53.py | 11 +++++++++++ .../train/object_detection/yolov3_mobilenetv1.py | 11 +++++++++++ .../train/object_detection/yolov3_mobilenetv3.py | 11 +++++++++++ .../deeplabv3p_mobilenetv2.py | 11 ++++++++++- .../deeplabv3p_mobilenetv2_x0.25.py | 10 +++++++++- .../deeplabv3p_xception65.py | 10 +++++++++- .../train/semantic_segmentation/fast_scnn.py | 13 +++++++++---- tutorials/train/semantic_segmentation/hrnet.py | 10 +++++++++- tutorials/train/semantic_segmentation/unet.py | 10 +++++++++- 22 files changed, 212 insertions(+), 29 deletions(-) diff --git a/docs/apis/datasets.md b/docs/apis/datasets.md index ae3595b..3494aaa 100644 --- a/docs/apis/datasets.md +++ b/docs/apis/datasets.md @@ -7,7 +7,7 @@ paddlex.datasets.ImageNet(data_dir, file_list, label_list, transforms=None, num_ ``` 读取ImageNet格式的分类数据集,并对样本进行相应的处理。ImageNet数据集格式的介绍可查看文档:[数据集格式说明](../data/format/index.html) -示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/classification/mobilenetv2.py#L25) +示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/image_classification/mobilenetv2.py) > **参数** @@ -20,15 +20,15 @@ paddlex.datasets.ImageNet(data_dir, file_list, label_list, transforms=None, num_ > > * **parallel_method** (str): 数据集中样本在预处理过程中并行处理的方式,支持'thread'线程和'process'进程两种方式。默认为'process'(Windows和Mac下会强制使用thread,该参数无效)。 > > * **shuffle** (bool): 是否需要对数据集中样本打乱顺序。默认为False。 -## paddlex.datasets.PascalVOC +## paddlex.datasets.VOCDetection > **用于目标检测模型** ``` -paddlex.datasets.PascalVOC(data_dir, file_list, label_list, transforms=None, num_workers=‘auto’, buffer_size=100, parallel_method='thread', shuffle=False) +paddlex.datasets.VOCDetection(data_dir, file_list, label_list, transforms=None, num_workers=‘auto’, buffer_size=100, parallel_method='thread', shuffle=False) ``` > 读取PascalVOC格式的检测数据集,并对样本进行相应的处理。PascalVOC数据集格式的介绍可查看文档:[数据集格式说明](../data/format/index.html) -> 示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/detection/yolov3_darknet53.py#L29) +> 示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/object_detection/yolov3_darknet53.py) > **参数** @@ -41,15 +41,15 @@ paddlex.datasets.PascalVOC(data_dir, file_list, label_list, transforms=None, num > > * **parallel_method** (str): 数据集中样本在预处理过程中并行处理的方式,支持'thread'线程和'process'进程两种方式。默认为'process'(Windows和Mac下会强制使用thread,该参数无效)。 > > * **shuffle** (bool): 是否需要对数据集中样本打乱顺序。默认为False。 -## paddlex.datasets.MSCOCO +## paddlex.datasets.CocoDetection > **用于实例分割/目标检测模型** ``` -paddlex.datasets.MSCOCO(data_dir, ann_file, transforms=None, num_workers='auto', buffer_size=100, parallel_method='thread', shuffle=False) +paddlex.datasets.CocoDetection(data_dir, ann_file, transforms=None, num_workers='auto', buffer_size=100, parallel_method='thread', shuffle=False) ``` > 读取MSCOCO格式的检测数据集,并对样本进行相应的处理,该格式的数据集同样可以应用到实例分割模型的训练中。MSCOCO数据集格式的介绍可查看文档:[数据集格式说明](../data/format/index.html) -> 示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/detection/mask_rcnn_r50_fpn.py#L27) +> 示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/instance_segmentation/mask_rcnn_r50_fpn.py) > **参数** @@ -69,7 +69,7 @@ paddlex.datasets.SegDataset(data_dir, file_list, label_list, transforms=None, nu > 读取语义分割任务数据集,并对样本进行相应的处理。语义分割任务数据集格式的介绍可查看文档:[数据集格式说明](../data/format/index.html) -> 示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/segmentation/unet.py#L27) +> 示例:[代码文件](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/semantic_segmentation/unet.py) > **参数** diff --git a/docs/quick_start.md b/docs/quick_start.md index 760b15b..a91df68 100644 --- a/docs/quick_start.md +++ b/docs/quick_start.md @@ -1,6 +1,8 @@ # 10分钟快速上手使用 -本文档在一个小数据集上展示了如何通过PaddleX进行训练,您可以阅读PaddleX的**使用教程**来了解更多模型任务的训练使用方式。本示例同步在AIStudio上,可直接[在线体验模型训练](https://aistudio.baidu.com/aistudio/projectdetail/439860) +本文档在一个小数据集上展示了如何通过PaddleX进行训练。本示例同步在AIStudio上,可直接[在线体验模型训练](https://aistudio.baidu.com/aistudio/projectdetail/450220)。 + +本示例代码源于Github [tutorials/train/classification/mobilenetv3_small_ssld.py](https://github.com/PaddlePaddle/PaddleX/blob/develop/tutorials/train/image_classification/mobilenetv3_small_ssld.py),用户可自行下载至本地运行。 PaddleX中的所有模型训练跟随以下3个步骤,即可快速完成训练代码开发! @@ -35,7 +37,7 @@ tar xzvf vegetables_cls.tar.gz **3. 定义训练/验证图像处理流程transforms** -由于训练时数据增强操作的加入,因此模型在训练和验证过程中,数据处理流程需要分别进行定义。如下所示,代码在`train_transforms`中加入了[RandomCrop](apis/transforms/cls_transforms.html#RandomCrop)和[RandomHorizontalFlip](apis/transforms/cls_transforms.html#RandomHorizontalFlip)两种数据增强方式, 更多方法可以参考[数据增强文档](apis/transforms/augment.md)。 +由于训练时数据增强操作的加入,因此模型在训练和验证过程中,数据处理流程需要分别进行定义。如下所示,代码在`train_transforms`中加入了[RandomCrop](apis/transforms/cls_transforms.html#randomcrop)和[RandomHorizontalFlip](apis/transforms/cls_transforms.html#randomhorizontalflip)两种数据增强方式, 更多方法可以参考[数据增强文档](apis/transforms/augment.md)。 ``` from paddlex.cls import transforms train_transforms = transforms.Compose([ @@ -54,7 +56,7 @@ eval_transforms = transforms.Compose([ **4. 定义`dataset`加载图像分类数据集** 定义数据集,`pdx.datasets.ImageNet`表示读取ImageNet格式的分类数据集 -- [paddlex.datasets.ImageNet接口说明](apis/datasets/classification.md) +- [paddlex.datasets.ImageNet接口说明](apis/datasets.md) - [ImageNet数据格式说明](data/format/classification.md) ``` @@ -118,7 +120,7 @@ Predict Result: Predict Result: [{'score': 0.9999393, 'category': 'bocai', 'cate **更多使用教程** -- 1.[目标检测模型训练](tutorials/train/detection.md) -- 2.[语义分割模型训练](tutorials/train/segmentation.md) -- 3.[实例分割模型训练](tutorials/train/instance_segmentation.md) -- 4.[模型太大,想要更小的模型,试试模型裁剪吧!](tutorials/compress/classification.md) +- 1.[目标检测模型训练](train/object_detection.md) +- 2.[语义分割模型训练](train/semantic_segmentation.md) +- 3.[实例分割模型训练](train/instance_segmentation.md) +- 4.[模型太大,想要更小的模型,试试模型裁剪吧!](https://github.com/PaddlePaddle/PaddleX/tree/develop/tutorials/compress) diff --git a/tutorials/train/image_classification/alexnet.py b/tutorials/train/image_classification/alexnet.py index b78f45b..bec0669 100644 --- a/tutorials/train/image_classification/alexnet.py +++ b/tutorials/train/image_classification/alexnet.py @@ -1,3 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.cls import transforms import paddlex as pdx @@ -6,6 +11,7 @@ veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz' pdx.utils.download_and_decompress(veg_dataset, path='./') # 定义训练和验证时的transforms +# API说明https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/cls_transforms.html train_transforms = transforms.Compose([ transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(), @@ -18,6 +24,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-imagenet train_dataset = pdx.datasets.ImageNet( data_dir='vegetables_cls', file_list='vegetables_cls/train_list.txt', @@ -33,11 +40,14 @@ eval_dataset = pdx.datasets.ImageNet( # 初始化模型,并进行训练 # 可使用VisualDL查看训练指标 # VisualDL启动方式: visualdl --logdir output/mobilenetv2/vdl_log --port 8001 -# 浏览器打开 https://0.0.0.0:8001即可 +# 浏览器打开 https://0.0.0.0:8001或https://localhost:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP model = pdx.cls.AlexNet(num_classes=len(train_dataset.labels)) # AlexNet需要指定确定的input_shape model.fixed_input_shape = [224, 224] + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/classification.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=10, train_dataset=train_dataset, diff --git a/tutorials/train/image_classification/mobilenetv2.py b/tutorials/train/image_classification/mobilenetv2.py index 5edbf58..7533aab 100644 --- a/tutorials/train/image_classification/mobilenetv2.py +++ b/tutorials/train/image_classification/mobilenetv2.py @@ -1,4 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.cls import transforms import paddlex as pdx @@ -7,6 +11,7 @@ veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz' pdx.utils.download_and_decompress(veg_dataset, path='./') # 定义训练和验证时的transforms +# API说明https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/cls_transforms.html train_transforms = transforms.Compose([ transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(), @@ -19,6 +24,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-imagenet train_dataset = pdx.datasets.ImageNet( data_dir='vegetables_cls', file_list='vegetables_cls/train_list.txt', @@ -37,6 +43,9 @@ eval_dataset = pdx.datasets.ImageNet( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP model = pdx.cls.MobileNetV2(num_classes=len(train_dataset.labels)) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/classification.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=10, train_dataset=train_dataset, diff --git a/tutorials/train/image_classification/mobilenetv3_small_ssld.py b/tutorials/train/image_classification/mobilenetv3_small_ssld.py index 6d52775..8f13312 100644 --- a/tutorials/train/image_classification/mobilenetv3_small_ssld.py +++ b/tutorials/train/image_classification/mobilenetv3_small_ssld.py @@ -1,4 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.cls import transforms import paddlex as pdx @@ -7,6 +11,7 @@ veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz' pdx.utils.download_and_decompress(veg_dataset, path='./') # 定义训练和验证时的transforms +# API说明https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/cls_transforms.html train_transforms = transforms.Compose([ transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(), @@ -19,6 +24,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-imagenet train_dataset = pdx.datasets.ImageNet( data_dir='vegetables_cls', file_list='vegetables_cls/train_list.txt', @@ -37,6 +43,9 @@ eval_dataset = pdx.datasets.ImageNet( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP model = pdx.cls.MobileNetV3_small_ssld(num_classes=len(train_dataset.labels)) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-imagenet +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=10, train_dataset=train_dataset, diff --git a/tutorials/train/image_classification/resnet50_vd_ssld.py b/tutorials/train/image_classification/resnet50_vd_ssld.py index ca94f7f..b72ebc5 100644 --- a/tutorials/train/image_classification/resnet50_vd_ssld.py +++ b/tutorials/train/image_classification/resnet50_vd_ssld.py @@ -1,4 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.cls import transforms import paddlex as pdx @@ -7,6 +11,7 @@ veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz' pdx.utils.download_and_decompress(veg_dataset, path='./') # 定义训练和验证时的transforms +# API说明https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/cls_transforms.html train_transforms = transforms.Compose([ transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(), @@ -19,6 +24,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-imagenet train_dataset = pdx.datasets.ImageNet( data_dir='vegetables_cls', file_list='vegetables_cls/train_list.txt', @@ -37,6 +43,9 @@ eval_dataset = pdx.datasets.ImageNet( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP model = pdx.cls.ResNet50_vd_ssld(num_classes=len(train_dataset.labels)) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/classification.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=10, train_dataset=train_dataset, diff --git a/tutorials/train/image_classification/shufflenetv2.py b/tutorials/train/image_classification/shufflenetv2.py index 29272df..cdfa188 100644 --- a/tutorials/train/image_classification/shufflenetv2.py +++ b/tutorials/train/image_classification/shufflenetv2.py @@ -1,4 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.cls import transforms import paddlex as pdx @@ -7,6 +11,7 @@ veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz' pdx.utils.download_and_decompress(veg_dataset, path='./') # 定义训练和验证时的transforms +# API说明https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/cls_transforms.html train_transforms = transforms.Compose([ transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(), @@ -19,6 +24,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-imagenet train_dataset = pdx.datasets.ImageNet( data_dir='vegetables_cls', file_list='vegetables_cls/train_list.txt', @@ -37,6 +43,9 @@ eval_dataset = pdx.datasets.ImageNet( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP model = pdx.cls.ShuffleNetV2(num_classes=len(train_dataset.labels)) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/classification.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=10, train_dataset=train_dataset, diff --git a/tutorials/train/instance_segmentation/mask_rcnn_hrnet_fpn.py b/tutorials/train/instance_segmentation/mask_rcnn_hrnet_fpn.py index 7f8e1eb..f784465 100644 --- a/tutorials/train/instance_segmentation/mask_rcnn_hrnet_fpn.py +++ b/tutorials/train/instance_segmentation/mask_rcnn_hrnet_fpn.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' from paddlex.det import transforms @@ -10,6 +11,7 @@ xiaoduxiong_dataset = 'https://bj.bcebos.com/paddlex/datasets/xiaoduxiong_ins_de pdx.utils.download_and_decompress(xiaoduxiong_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Normalize(), @@ -24,6 +26,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-cocodetection train_dataset = pdx.datasets.CocoDetection( data_dir='xiaoduxiong_ins_det/JPEGImages', ann_file='xiaoduxiong_ins_det/train.json', @@ -41,7 +44,12 @@ eval_dataset = pdx.datasets.CocoDetection( # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP # num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1 num_classes = len(train_dataset.labels) + 1 + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/instance_segmentation.html#maskrcnn model = pdx.det.MaskRCNN(num_classes=num_classes, backbone='HRNet_W18') + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/instance_segmentation.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=12, train_dataset=train_dataset, diff --git a/tutorials/train/instance_segmentation/mask_rcnn_r18_fpn.py b/tutorials/train/instance_segmentation/mask_rcnn_r18_fpn.py index a410515..dc16b66 100644 --- a/tutorials/train/instance_segmentation/mask_rcnn_r18_fpn.py +++ b/tutorials/train/instance_segmentation/mask_rcnn_r18_fpn.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' from paddlex.det import transforms @@ -10,6 +11,7 @@ xiaoduxiong_dataset = 'https://bj.bcebos.com/paddlex/datasets/xiaoduxiong_ins_de pdx.utils.download_and_decompress(xiaoduxiong_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Normalize(), @@ -24,6 +26,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-cocodetection train_dataset = pdx.datasets.CocoDetection( data_dir='xiaoduxiong_ins_det/JPEGImages', ann_file='xiaoduxiong_ins_det/train.json', @@ -41,7 +44,12 @@ eval_dataset = pdx.datasets.CocoDetection( # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP # num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1 num_classes = len(train_dataset.labels) + 1 + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/instance_segmentation.html#maskrcnn model = pdx.det.MaskRCNN(num_classes=num_classes, backbone='ResNet18') + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/instance_segmentation.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=12, train_dataset=train_dataset, diff --git a/tutorials/train/instance_segmentation/mask_rcnn_r50_fpn.py b/tutorials/train/instance_segmentation/mask_rcnn_r50_fpn.py index 8174998..e87c88e 100644 --- a/tutorials/train/instance_segmentation/mask_rcnn_r50_fpn.py +++ b/tutorials/train/instance_segmentation/mask_rcnn_r50_fpn.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' from paddlex.det import transforms @@ -10,6 +11,7 @@ xiaoduxiong_dataset = 'https://bj.bcebos.com/paddlex/datasets/xiaoduxiong_ins_de pdx.utils.download_and_decompress(xiaoduxiong_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Normalize(), @@ -24,6 +26,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-cocodetection train_dataset = pdx.datasets.CocoDetection( data_dir='xiaoduxiong_ins_det/JPEGImages', ann_file='xiaoduxiong_ins_det/train.json', @@ -41,7 +44,12 @@ eval_dataset = pdx.datasets.CocoDetection( # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP # num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1 num_classes = len(train_dataset.labels) + 1 + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/instance_segmentation.html#maskrcnn model = pdx.det.MaskRCNN(num_classes=num_classes, backbone='ResNet50') + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/instance_segmentation.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=12, train_dataset=train_dataset, diff --git a/tutorials/train/object_detection/faster_rcnn_hrnet_fpn.py b/tutorials/train/object_detection/faster_rcnn_hrnet_fpn.py index 3d1650f..e46d3ae 100644 --- a/tutorials/train/object_detection/faster_rcnn_hrnet_fpn.py +++ b/tutorials/train/object_detection/faster_rcnn_hrnet_fpn.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' from paddlex.det import transforms @@ -10,6 +11,7 @@ insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz' pdx.utils.download_and_decompress(insect_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Normalize(), @@ -24,6 +26,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection train_dataset = pdx.datasets.VOCDetection( data_dir='insect_det', file_list='insect_det/train_list.txt', @@ -43,7 +46,12 @@ eval_dataset = pdx.datasets.VOCDetection( # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP # num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1 num_classes = len(train_dataset.labels) + 1 + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-fasterrcnn model = pdx.det.FasterRCNN(num_classes=num_classes, backbone='HRNet_W18') + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#id1 +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=12, train_dataset=train_dataset, diff --git a/tutorials/train/object_detection/faster_rcnn_r18_fpn.py b/tutorials/train/object_detection/faster_rcnn_r18_fpn.py index cd8928a..0ae82d3 100644 --- a/tutorials/train/object_detection/faster_rcnn_r18_fpn.py +++ b/tutorials/train/object_detection/faster_rcnn_r18_fpn.py @@ -1,4 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.det import transforms import paddlex as pdx @@ -7,6 +11,7 @@ insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz' pdx.utils.download_and_decompress(insect_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Normalize(), @@ -19,7 +24,9 @@ eval_transforms = transforms.Compose([ transforms.ResizeByShort(short_size=800, max_size=1333), transforms.Padding(coarsest_stride=32), ]) + # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection train_dataset = pdx.datasets.VOCDetection( data_dir='insect_det', file_list='insect_det/train_list.txt', @@ -39,7 +46,12 @@ eval_dataset = pdx.datasets.VOCDetection( # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP # num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1 num_classes = len(train_dataset.labels) + 1 + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-fasterrcnn model = pdx.det.FasterRCNN(num_classes=num_classes, backbone='ResNet18') + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#id1 +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=12, train_dataset=train_dataset, diff --git a/tutorials/train/object_detection/faster_rcnn_r50_fpn.py b/tutorials/train/object_detection/faster_rcnn_r50_fpn.py index 350c40f..0f26bfa 100644 --- a/tutorials/train/object_detection/faster_rcnn_r50_fpn.py +++ b/tutorials/train/object_detection/faster_rcnn_r50_fpn.py @@ -1,4 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.det import transforms import paddlex as pdx @@ -7,6 +11,7 @@ insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz' pdx.utils.download_and_decompress(insect_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Normalize(), @@ -19,7 +24,9 @@ eval_transforms = transforms.Compose([ transforms.ResizeByShort(short_size=800, max_size=1333), transforms.Padding(coarsest_stride=32), ]) + # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection train_dataset = pdx.datasets.VOCDetection( data_dir='insect_det', file_list='insect_det/train_list.txt', @@ -39,7 +46,12 @@ eval_dataset = pdx.datasets.VOCDetection( # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP # num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1 num_classes = len(train_dataset.labels) + 1 + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-fasterrcnn model = pdx.det.FasterRCNN(num_classes=num_classes, backbone='ResNet50') + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#id1 +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=12, train_dataset=train_dataset, diff --git a/tutorials/train/object_detection/yolov3_darknet53.py b/tutorials/train/object_detection/yolov3_darknet53.py index a15e5cb..085be4b 100644 --- a/tutorials/train/object_detection/yolov3_darknet53.py +++ b/tutorials/train/object_detection/yolov3_darknet53.py @@ -1,4 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.det import transforms import paddlex as pdx @@ -7,6 +11,7 @@ insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz' pdx.utils.download_and_decompress(insect_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(), @@ -23,6 +28,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection train_dataset = pdx.datasets.VOCDetection( data_dir='insect_det', file_list='insect_det/train_list.txt', @@ -41,7 +47,12 @@ eval_dataset = pdx.datasets.VOCDetection( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP num_classes = len(train_dataset.labels) + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3 model = pdx.det.YOLOv3(num_classes=num_classes, backbone='DarkNet53') + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=270, train_dataset=train_dataset, diff --git a/tutorials/train/object_detection/yolov3_mobilenetv1.py b/tutorials/train/object_detection/yolov3_mobilenetv1.py index 9b62185..bfc2bea 100644 --- a/tutorials/train/object_detection/yolov3_mobilenetv1.py +++ b/tutorials/train/object_detection/yolov3_mobilenetv1.py @@ -1,4 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.det import transforms import paddlex as pdx @@ -7,6 +11,7 @@ insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz' pdx.utils.download_and_decompress(insect_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(), @@ -23,6 +28,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection train_dataset = pdx.datasets.VOCDetection( data_dir='insect_det', file_list='insect_det/train_list.txt', @@ -41,7 +47,12 @@ eval_dataset = pdx.datasets.VOCDetection( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP num_classes = len(train_dataset.labels) + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3 model = pdx.det.YOLOv3(num_classes=num_classes, backbone='MobileNetV1') + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=270, train_dataset=train_dataset, diff --git a/tutorials/train/object_detection/yolov3_mobilenetv3.py b/tutorials/train/object_detection/yolov3_mobilenetv3.py index 4eb0609..8557078 100644 --- a/tutorials/train/object_detection/yolov3_mobilenetv3.py +++ b/tutorials/train/object_detection/yolov3_mobilenetv3.py @@ -1,4 +1,8 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + from paddlex.det import transforms import paddlex as pdx @@ -7,6 +11,7 @@ insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz' pdx.utils.download_and_decompress(insect_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(), @@ -23,6 +28,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection train_dataset = pdx.datasets.VOCDetection( data_dir='insect_det', file_list='insect_det/train_list.txt', @@ -41,7 +47,12 @@ eval_dataset = pdx.datasets.VOCDetection( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP num_classes = len(train_dataset.labels) + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3 model = pdx.det.YOLOv3(num_classes=num_classes, backbone='MobileNetV3_large') + +# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=270, train_dataset=train_dataset, diff --git a/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2.py b/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2.py index 28931a6..fc5b738 100644 --- a/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2.py +++ b/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' import paddlex as pdx @@ -10,6 +11,7 @@ optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz' pdx.utils.download_and_decompress(optic_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/seg_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(), @@ -24,6 +26,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-segdataset train_dataset = pdx.datasets.SegDataset( data_dir='optic_disc_seg', file_list='optic_disc_seg/train_list.txt', @@ -42,7 +45,13 @@ eval_dataset = pdx.datasets.SegDataset( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP num_classes = len(train_dataset.labels) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-deeplabv3p model = pdx.seg.DeepLabv3p(num_classes=num_classes, backbone='MobileNetV2_x1.0') + + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=40, train_dataset=train_dataset, diff --git a/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2_x0.25.py b/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2_x0.25.py index 04553e3..51bdb3d 100644 --- a/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2_x0.25.py +++ b/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2_x0.25.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' import paddlex as pdx @@ -10,6 +11,7 @@ optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz' pdx.utils.download_and_decompress(optic_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/seg_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(), @@ -24,6 +26,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-segdataset train_dataset = pdx.datasets.SegDataset( data_dir='optic_disc_seg', file_list='optic_disc_seg/train_list.txt', @@ -42,7 +45,12 @@ eval_dataset = pdx.datasets.SegDataset( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP num_classes = len(train_dataset.labels) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-deeplabv3p model = pdx.seg.DeepLabv3p(num_classes=num_classes, backbone='MobileNetV2_x0.25') + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=40, train_dataset=train_dataset, diff --git a/tutorials/train/semantic_segmentation/deeplabv3p_xception65.py b/tutorials/train/semantic_segmentation/deeplabv3p_xception65.py index 70167ff..4101c98 100644 --- a/tutorials/train/semantic_segmentation/deeplabv3p_xception65.py +++ b/tutorials/train/semantic_segmentation/deeplabv3p_xception65.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' import paddlex as pdx @@ -10,6 +11,7 @@ optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz' pdx.utils.download_and_decompress(optic_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/seg_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(), @@ -24,6 +26,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-segdataset train_dataset = pdx.datasets.SegDataset( data_dir='optic_disc_seg', file_list='optic_disc_seg/train_list.txt', @@ -42,7 +45,12 @@ eval_dataset = pdx.datasets.SegDataset( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP num_classes = len(train_dataset.labels) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-deeplabv3p model = pdx.seg.DeepLabv3p(num_classes=num_classes, backbone='Xception65') + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=40, train_dataset=train_dataset, diff --git a/tutorials/train/semantic_segmentation/fast_scnn.py b/tutorials/train/semantic_segmentation/fast_scnn.py index af041ca..38fa51a 100644 --- a/tutorials/train/semantic_segmentation/fast_scnn.py +++ b/tutorials/train/semantic_segmentation/fast_scnn.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' import paddlex as pdx @@ -10,7 +11,7 @@ optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz' pdx.utils.download_and_decompress(optic_dataset, path='./') # 定义训练和验证时的transforms -# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/seg_transforms.html#composedsegtransforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/seg_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(), @@ -25,7 +26,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 -# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/datasets/semantic_segmentation.html#segdataset +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-segdataset train_dataset = pdx.datasets.SegDataset( data_dir='optic_disc_seg', file_list='optic_disc_seg/train_list.txt', @@ -44,9 +45,13 @@ eval_dataset = pdx.datasets.SegDataset( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP -# https://paddlex.readthedocs.io/zh_CN/latest/apis/models/semantic_segmentation.html#fastscnn num_classes = len(train_dataset.labels) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-fastscnn model = pdx.seg.FastSCNN(num_classes=num_classes) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=20, train_dataset=train_dataset, diff --git a/tutorials/train/semantic_segmentation/hrnet.py b/tutorials/train/semantic_segmentation/hrnet.py index 330a107..9526e99 100644 --- a/tutorials/train/semantic_segmentation/hrnet.py +++ b/tutorials/train/semantic_segmentation/hrnet.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' import paddlex as pdx @@ -10,6 +11,7 @@ optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz' pdx.utils.download_and_decompress(optic_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/seg_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(), @@ -24,6 +26,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-segdataset train_dataset = pdx.datasets.SegDataset( data_dir='optic_disc_seg', file_list='optic_disc_seg/train_list.txt', @@ -42,7 +45,12 @@ eval_dataset = pdx.datasets.SegDataset( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP num_classes = len(train_dataset.labels) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-hrnet model = pdx.seg.HRNet(num_classes=num_classes) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=20, train_dataset=train_dataset, diff --git a/tutorials/train/semantic_segmentation/unet.py b/tutorials/train/semantic_segmentation/unet.py index 46e93b8..c0ba726 100644 --- a/tutorials/train/semantic_segmentation/unet.py +++ b/tutorials/train/semantic_segmentation/unet.py @@ -1,5 +1,6 @@ +# 环境变量配置,用于控制是否使用GPU +# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os -# 选择使用0号卡 os.environ['CUDA_VISIBLE_DEVICES'] = '0' import paddlex as pdx @@ -10,6 +11,7 @@ optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz' pdx.utils.download_and_decompress(optic_dataset, path='./') # 定义训练和验证时的transforms +# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/seg_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(), @@ -23,6 +25,7 @@ eval_transforms = transforms.Compose([ ]) # 定义训练和验证所用的数据集 +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-segdataset train_dataset = pdx.datasets.SegDataset( data_dir='optic_disc_seg', file_list='optic_disc_seg/train_list.txt', @@ -41,7 +44,12 @@ eval_dataset = pdx.datasets.SegDataset( # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP num_classes = len(train_dataset.labels) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-deeplabv3p model = pdx.seg.UNet(num_classes=num_classes) + +# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#train +# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=20, train_dataset=train_dataset, -- GitLab