From 1d435df9777fc93c38472c7fa0bf1b2d20c5b9f9 Mon Sep 17 00:00:00 2001 From: ceci3 Date: Fri, 8 Jul 2022 10:54:36 +0800 Subject: [PATCH] optimize segmentic readme (#1273) --- .../semantic_segmentation/README.md | 84 ++++++------------- .../configs/deeplabv3/deeplabv3_qat.yaml | 3 + .../configs/hrnet/hrnet_qat.yaml | 3 + .../configs/hrnet/hrnet_sparse.yaml | 3 + .../configs/pp_humanseg/pp_humanseg_auto.yaml | 7 ++ .../configs/pp_humanseg/pp_humanseg_qat.yaml | 6 ++ .../pp_humanseg/pp_humanseg_sparse.yaml | 6 ++ .../configs/pp_liteseg/pp_liteseg_auto.yaml | 6 ++ .../configs/pp_liteseg/pp_liteseg_qat.yaml | 6 ++ .../configs/pp_liteseg/pp_liteseg_sparse.yaml | 6 ++ .../configs/unet/unet_channel_prune.yaml | 3 + .../configs/unet/unet_qat.yaml | 3 + .../semantic_segmentation/run.py | 67 ++++++--------- paddleslim/auto_compression/compressor.py | 6 +- 14 files changed, 104 insertions(+), 105 deletions(-) diff --git a/example/auto_compression/semantic_segmentation/README.md b/example/auto_compression/semantic_segmentation/README.md index 1cf82cb0..adddcc1a 100644 --- a/example/auto_compression/semantic_segmentation/README.md +++ b/example/auto_compression/semantic_segmentation/README.md @@ -41,16 +41,6 @@ - 软件:CUDA 11.0, cuDNN 8.0, TensorRT 8.0 - 测试配置:batch_size: 40, max_seq_len: 128 -- PP-HumanSeg-Lite数据集 - - - 数据集:AISegment + PP-HumanSeg14K + 内部自建数据集。其中 AISegment 是开源数据集,可从[链接](https://github.com/aisegmentcn/matting_human_datasets)处获取;PP-HumanSeg14K 是 PaddleSeg 自建数据集,可从[官方渠道](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.5/contrib/PP-HumanSeg/paper.md#pp-humanseg14k-a-large-scale-teleconferencing-video-dataset)获取;内部数据集不对外公开。 - - 示例数据集: 用于快速跑通人像分割的压缩和推理流程, 不能用该数据集复现 benckmark 表中的压缩效果。 [下载链接](https://paddleseg.bj.bcebos.com/humanseg/data/mini_supervisely.zip) - -- PP-Liteseg,HRNet,UNet,Deeplabv3-ResNet50数据集 - - - cityscapes: 请从[cityscapes官网](https://www.cityscapes-dataset.com/login/)下载完整数据 - - 示例数据集: cityscapes数据集的一个子集,用于快速跑通压缩和推理流程,不能用该数据集复现 benchmark 表中的压缩效果。[下载链接](https://bj.bcebos.com/v1/paddle-slim-models/data/mini_cityscapes/mini_cityscapes.tar) - 下面将以开源数据集为例介绍如何对PP-HumanSeg-Lite进行自动压缩。 ## 3. 自动压缩流程 @@ -86,14 +76,25 @@ pip install paddleseg 开发者可下载开源数据集 (如[AISegment](https://github.com/aisegmentcn/matting_human_datasets)) 或自定义语义分割数据集。请参考[PaddleSeg数据准备文档](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.5/docs/data/marker/marker_cn.md)来检查、对齐数据格式即可。 -可以通过以下命令下载人像分割示例数据: +本示例使用示例开源数据集 AISegment 数据集为例介绍如何对PP-HumanSeg-Lite进行自动压缩。示例中的数据集仅用于快速跑通自动压缩流程,并不能复现出 benckmark 表中的压缩效果。 +可以通过以下命令下载人像分割示例数据: ```shell -cd ./data -python download_data.py mini_humanseg - +python ./data/download_data.py mini_humanseg +### 下载后的数据位置为 ./data/humanseg/ ``` +** 提示: ** +- PP-HumanSeg-Lite压缩过程使用的数据集 + + - 数据集:AISegment + PP-HumanSeg14K + 内部自建数据集。其中 AISegment 是开源数据集,可从[链接](https://github.com/aisegmentcn/matting_human_datasets)处获取;PP-HumanSeg14K 是 PaddleSeg 自建数据集,可从[官方渠道](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.5/contrib/PP-HumanSeg/paper.md#pp-humanseg14k-a-large-scale-teleconferencing-video-dataset)获取;内部数据集不对外公开。 + - 示例数据集: 用于快速跑通人像分割的压缩和推理流程, 不能用该数据集复现 benckmark 表中的压缩效果。 [下载链接](https://paddleseg.bj.bcebos.com/humanseg/data/mini_supervisely.zip) + +- PP-Liteseg,HRNet,UNet,Deeplabv3-ResNet50数据集 + + - cityscapes: 请从[cityscapes官网](https://www.cityscapes-dataset.com/login/)下载完整数据 + - 示例数据集: cityscapes数据集的一个子集,用于快速跑通压缩和推理流程,不能用该数据集复现 benchmark 表中的压缩效果。[下载链接](https://bj.bcebos.com/v1/paddle-slim-models/data/mini_cityscapes/mini_cityscapes.tar) + #### 3.3 准备预测模型 预测模型的格式为:`model.pdmodel` 和 `model.pdiparams`两个,带`pdmodel`的是模型文件,带`pdiparams`后缀的是权重文件。 @@ -111,77 +112,40 @@ tar -xzf ppseg_lite_portrait_398x224_with_softmax.tar.gz #### 3.4 自动压缩并产出模型 -自动压缩示例通过run.py脚本启动,会使用接口```paddleslim.auto_compression.AutoCompression```对模型进行自动压缩。首先要配置config文件中模型路径、数据集路径、蒸馏、量化、稀疏化和训练等部分的参数,配置完成后便可对模型进行非结构化稀疏、蒸馏和量化、蒸馏。 +自动压缩示例通过run.py脚本启动,会使用接口 ```paddleslim.auto_compression.AutoCompression``` 对模型进行自动压缩。首先要配置config文件中模型路径、数据集路径、蒸馏、量化、稀疏化和训练等部分的参数,配置完成后便可对模型进行非结构化稀疏、蒸馏和量化、蒸馏。 -当只设置训练参数,并传入``deploy_hardware``字段时,将自动搜索压缩策略进行压缩。以骁龙710(SD710)为部署硬件,进行自动压缩的运行命令如下: +当只设置训练参数,并在config文件中 ```Global``` 配置中传入 ```deploy_hardware``` 字段时,将自动搜索压缩策略进行压缩。以骁龙710(SD710)为部署硬件,进行自动压缩的运行命令如下: ```shell # 单卡启动 export CUDA_VISIBLE_DEVICES=0 -python run.py \ - --model_dir='./ppseg_lite_portrait_398x224_with_softmax' \ - --model_filename='model.pdmodel' \ - --params_filename='model.pdiparams' \ - --save_dir='./save_model' \ - --strategy_config='configs/pp_humanseg/pp_humanseg_auto.yaml' \ - --dataset_config='configs/dataset/humanseg_dataset.yaml' \ - --deploy_hardware='SD710' +python run.py --config_path='./configs/pp_humanseg/pp_humanseg_auto.yaml' --save_dir='./save_compressed_model' # 多卡启动 export CUDA_VISIBLE_DEVICES=0,1 -python -m paddle.distributed.launch run.py \ - --model_dir='./ppseg_lite_portrait_398x224_with_softmax' \ - --model_filename='model.pdmodel' \ - --params_filename='model.pdiparams' \ - --save_dir='./save_model' \ - --strategy_config='configs/pp_humanseg/pp_humanseg_auto.yaml' \ - --dataset_config='configs/dataset/humanseg_dataset.yaml' \ - --deploy_hardware='SD710' +python -m paddle.distributed.launch run.py --config_path='./configs/pp_humanseg/pp_humanseg_auto.yaml' --save_dir='./save_compressed_model' ``` + - 自行配置稀疏参数进行非结构化稀疏和蒸馏训练,配置参数含义详见[自动压缩超参文档](https://github.com/PaddlePaddle/PaddleSlim/blob/27dafe1c722476f1b16879f7045e9215b6f37559/demo/auto_compression/hyperparameter_tutorial.md)。具体命令如下所示: ```shell # 单卡启动 export CUDA_VISIBLE_DEVICES=0 -python run.py \ - --model_dir='./ppseg_lite_portrait_398x224_with_softmax' \ - --model_filename='model.pdmodel' \ - --params_filename='model.pdiparams' \ - --save_dir='./save_model' \ - --strategy_config='configs/pp_humanseg/pp_humanseg_sparse.yaml' \ - --dataset_config='configs/dataset/humanseg_dataset.yaml' +python run.py --config_path='./configs/pp_humanseg/pp_humanseg_sparse.yaml' --save_dir='./save_sparse_model' # 多卡启动 export CUDA_VISIBLE_DEVICES=0,1 -python -m paddle.distributed.launch run.py \ - --model_dir='./ppseg_lite_portrait_398x224_with_softmax' \ - --model_filename='model.pdmodel' \ - --params_filename='model.pdiparams' \ - --save_dir='./save_model' \ - --strategy_config='configs/pp_humanseg/pp_humanseg_sparse.yaml' \ - --dataset_config='configs/dataset/humanseg_dataset.yaml' +python -m paddle.distributed.launch run.py --config_path='./configs/pp_humanseg/pp_humanseg_sparse.yaml' --save_dir='./save_sparse_model' ``` - 自行配置量化参数进行量化和蒸馏训练,配置参数含义详见[自动压缩超参文档](https://github.com/PaddlePaddle/PaddleSlim/blob/27dafe1c722476f1b16879f7045e9215b6f37559/demo/auto_compression/hyperparameter_tutorial.md)。具体命令如下所示: ```shell # 单卡启动 export CUDA_VISIBLE_DEVICES=0 -python run.py \ - --model_dir='./ppseg_lite_portrait_398x224_with_softmax' \ - --model_filename='model.pdmodel' \ - --params_filename='model.pdiparams' \ - --save_dir='./save_model' \ - --strategy_config='configs/pp_humanseg/pp_humanseg_qat.yaml' \ - --dataset_config='configs/dataset/humanseg_dataset.yaml' +python run.py --config_path='./configs/pp_humanseg/pp_humanseg_qat.yaml' --save_dir='./save_quant_model' # 多卡启动 export CUDA_VISIBLE_DEVICES=0,1 -python -m paddle.distributed.launch run.py \ - --model_dir='./ppseg_lite_portrait_398x224_with_softmax' \ - --model_filename='model.pdmodel' \ - --params_filename='model.pdiparams' \ - --save_dir='./save_model' \ - --strategy_config='configs/pp_humanseg/pp_humanseg_qat.yaml' \ - --dataset_config='configs/dataset/humanseg_dataset.yaml' +python -m paddle.distributed.launch run.py --config_path='./configs/pp_humanseg/pp_humanseg_qat.yaml' --save_dir='./save_quant_model' ``` 压缩完成后会在`save_dir`中产出压缩好的预测模型,可直接预测部署。 diff --git a/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml b/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml index 0d718cc6..035c2890 100644 --- a/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml @@ -1,5 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml + model_dir: ./RES-paddle2-Deeplabv3-ResNet50 + model_filename: model + params_filename: params Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_qat.yaml b/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_qat.yaml index b3d22929..36d3fde4 100644 --- a/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_qat.yaml @@ -1,5 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml + model_dir: ./RES-paddle2-HRNetW18-Seg + model_filename: model + params_filename: params Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_sparse.yaml b/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_sparse.yaml index 6091d889..922589c3 100644 --- a/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_sparse.yaml +++ b/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_sparse.yaml @@ -1,5 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml + model_dir: ./RES-paddle2-HRNetW18-Seg + model_filename: model + params_filename: params Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_auto.yaml b/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_auto.yaml index 8adde821..d9f31251 100644 --- a/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_auto.yaml +++ b/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_auto.yaml @@ -1,3 +1,10 @@ +Global: + reader_config: configs/dataset/humanseg_dataset.yaml + model_dir: ./ppseg_lite_portrait_398x224_with_softmax + model_filename: model.pdmodel + params_filename: model.pdiparams + deploy_hardware: SD710 + TrainConfig: epochs: 14 eval_iter: 400 diff --git a/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_qat.yaml b/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_qat.yaml index d11af163..8a917f98 100644 --- a/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_qat.yaml @@ -1,3 +1,9 @@ +Global: + reader_config: configs/dataset/humanseg_dataset.yaml + model_dir: ./ppseg_lite_portrait_398x224_with_softmax + model_filename: model.pdmodel + params_filename: model.pdiparams + Distillation: alpha: 1.0 loss: l2 diff --git a/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_sparse.yaml b/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_sparse.yaml index 5e71f396..1cb7adb1 100644 --- a/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_sparse.yaml +++ b/example/auto_compression/semantic_segmentation/configs/pp_humanseg/pp_humanseg_sparse.yaml @@ -1,3 +1,9 @@ +Global: + reader_config: configs/dataset/humanseg_dataset.yaml + model_dir: ./ppseg_lite_portrait_398x224_with_softmax + model_filename: model.pdmodel + params_filename: model.pdiparams + Distillation: alpha: 1.0 loss: l2 diff --git a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_auto.yaml b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_auto.yaml index 42ff6c5d..003078aa 100644 --- a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_auto.yaml +++ b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_auto.yaml @@ -1,3 +1,9 @@ +Global: + reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml + model_dir: ./RES-paddle2-PPLIteSegSTDC1 + model_filename: model + params_filename: params + TrainConfig: epochs: 14 eval_iter: 90 diff --git a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_qat.yaml b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_qat.yaml index 6bb5b42d..67ee9d69 100644 --- a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_qat.yaml @@ -1,3 +1,9 @@ +Global: + reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml + model_dir: ./RES-paddle2-PPLIteSegSTDC1 + model_filename: model + params_filename: params + Distillation: alpha: 1.0 loss: l2 diff --git a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_sparse.yaml b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_sparse.yaml index f3a6d958..52f256da 100644 --- a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_sparse.yaml +++ b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_sparse.yaml @@ -1,3 +1,9 @@ +Global: + reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml + model_dir: ./RES-paddle2-PPLIteSegSTDC1 + model_filename: model + params_filename: params + Distillation: alpha: 1.0 loss: l2 diff --git a/example/auto_compression/semantic_segmentation/configs/unet/unet_channel_prune.yaml b/example/auto_compression/semantic_segmentation/configs/unet/unet_channel_prune.yaml index 45716fa8..920c3b4d 100644 --- a/example/auto_compression/semantic_segmentation/configs/unet/unet_channel_prune.yaml +++ b/example/auto_compression/semantic_segmentation/configs/unet/unet_channel_prune.yaml @@ -1,5 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml + model_dir: ./RES-paddle2-UNet + model_filename: model + params_filename: params Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/configs/unet/unet_qat.yaml b/example/auto_compression/semantic_segmentation/configs/unet/unet_qat.yaml index 02009ebf..f686f41f 100644 --- a/example/auto_compression/semantic_segmentation/configs/unet/unet_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/unet/unet_qat.yaml @@ -1,5 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml + model_dir: ./RES-paddle2-UNet + model_filename: model + params_filename: params Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/run.py b/example/auto_compression/semantic_segmentation/run.py index 2cda35b5..fbb3989c 100644 --- a/example/auto_compression/semantic_segmentation/run.py +++ b/example/auto_compression/semantic_segmentation/run.py @@ -21,48 +21,24 @@ from paddleseg.cvlibs import Config as PaddleSegDataConfig from paddleseg.utils import worker_init_fn from paddleslim.auto_compression import AutoCompression +from paddleslim.auto_compression.config_helpers import load_config as load_slim_config from paddleseg.core.infer import reverse_transform from paddleseg.utils import metrics -def parse_args(): - parser = argparse.ArgumentParser(description='Model training') +def argsparser(): + parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( - '--model_dir', + '--config_path', type=str, default=None, - help="inference model directory.") - parser.add_argument( - '--model_filename', - type=str, - default=None, - help="inference model filename.") - parser.add_argument( - '--params_filename', - type=str, - default=None, - help="inference params filename.") + help="path of compression strategy config.") parser.add_argument( '--save_dir', type=str, default=None, help="directory to save compressed model.") - parser.add_argument( - '--strategy_config', - type=str, - default=None, - help="path of compression strategy config.") - parser.add_argument( - '--dataset_config', - type=str, - default=None, - help="path of dataset config.") - parser.add_argument( - '--deploy_hardware', - type=str, - default=None, - help="The hardware you want to deploy.") - return parser.parse_args() + return parser def eval_function(exe, compiled_test_program, test_feed_names, test_fetch_list): @@ -141,13 +117,15 @@ def reader_wrapper(reader): return gen -if __name__ == '__main__': +def main(args): + all_config = load_slim_config(args.config_path) + assert "Global" in all_config, f"Key 'Global' not found in config file. \n{all_config}" + config = all_config["Global"] + rank_id = paddle.distributed.get_rank() place = paddle.CUDAPlace(rank_id) - args = parse_args() - paddle.enable_static() # step1: load dataset config and create dataloader - data_cfg = PaddleSegDataConfig(args.dataset_config) + data_cfg = PaddleSegDataConfig(config['reader_config']) train_dataset = data_cfg.train_dataset eval_dataset = data_cfg.val_dataset batch_sampler = paddle.io.DistributedBatchSampler( @@ -166,19 +144,24 @@ if __name__ == '__main__': nranks = paddle.distributed.get_world_size() rank_id = paddle.distributed.get_rank() - if nranks > 1 and rank_id != 0: - eval_function = None # step2: create and instance of AutoCompression ac = AutoCompression( - model_dir=args.model_dir, - model_filename=args.model_filename, - params_filename=args.params_filename, + model_dir=config['model_dir'], + model_filename=config['model_filename'], + params_filename=config['params_filename'], save_dir=args.save_dir, - config=args.strategy_config, + config=all_config, train_dataloader=train_dataloader, - eval_callback=eval_function, - deploy_hardware=args.deploy_hardware) + eval_callback=eval_function if nranks > 1 and rank_id != 0 else None, + deploy_hardware=config.get('deploy_hardware') or None) # step3: start the compression job ac.compress() + + +if __name__ == '__main__': + paddle.enable_static() + parser = argsparser() + args = parser.parse_args() + main(args) diff --git a/paddleslim/auto_compression/compressor.py b/paddleslim/auto_compression/compressor.py index 144290e7..321043a3 100644 --- a/paddleslim/auto_compression/compressor.py +++ b/paddleslim/auto_compression/compressor.py @@ -465,10 +465,10 @@ class AutoCompression: 'train_config must has `epochs` or `train_iter` field.') config_dict['gmp_config'] = { 'stable_iterations': 0, - 'pruning_iterations': 0.45 * total_iters, - 'tunning_iterations': 0.45 * total_iters, + 'pruning_iterations': max(0.45 * total_iters, 30), + 'tunning_iterations': max(0.45 * total_iters, 30), 'resume_iteration': -1, - 'pruning_steps': 100, + 'pruning_steps': 100 if (0.45 * total_iters) > 1000 else 1, 'initial_ratio': 0.15, } ### add prune program -- GitLab