未验证 提交 b18ed451 编写于 作者: Z zhouzj 提交者: GitHub

add BiSeNetV2. (#1470)

上级 9dbdada8
Global:
reader_config: ./configs/picodet_reader.yml
input_list: ['image', 'scale_factor']
Evaluation: True
model_dir: ./picodet_s_416_coco_lcnet/
model_filename: model.pdmodel
......
Global:
reader_config: configs/yolo_reader.yml
input_list: ['image', 'scale_factor']
arch: YOLO
Evaluation: True
model_dir: ./ppyoloe_crn_l_300e_coco
......
Global:
reader_config: configs/yolo_reader.yml
input_list: ['image']
arch: PPYOLOE # When export exclude_nms=True, need set arch: PPYOLOE
Evaluation: True
model_dir: ./ppyoloe_crn_s_300e_coco
......
Global:
reader_config: configs/ssd_reader.yml
input_list: ['image', 'scale_factor', 'im_shape']
Evaluation: True
model_dir: ./ssd_mobilenet_v1_300_120e_voc # Model Link: https://bj.bcebos.com/v1/paddle-slim-models/act/ssd_mobilenet_v1_300_120e_voc.tar
model_filename: model.pdmodel
......
Global:
arch: 'keypoint'
reader_config: configs/tinypose_reader.yml
input_list: ['image']
Evaluation: True
model_dir: ./tinypose_128x96
model_filename: model.pdmodel
......
Global:
reader_config: configs/yolo_reader.yml
input_list: ['image', 'im_shape', 'scale_factor']
Evaluation: True
model_dir: ./yolov3_mobilenet_v1_270e_coco # Model Link: https://paddledet.bj.bcebos.com/models/slim/yolov3_mobilenet_v1_270e_coco.tar
model_filename: model.pdmodel
......
......@@ -24,6 +24,7 @@ from paddleslim.common import load_config as load_slim_config
from paddleslim.auto_compression import AutoCompression
from keypoint_utils import keypoint_post_process
from post_process import PPYOLOEPostProcess
from paddleslim.common.dataloader import get_feed_vars
def argsparser():
......@@ -134,6 +135,9 @@ def main():
train_loader = create('EvalReader')(reader_cfg['TrainDataset'],
reader_cfg['worker_num'],
return_list=True)
global_config['input_list'] = get_feed_vars(
global_config['model_dir'], global_config['model_filename'],
global_config['params_filename'])
train_loader = reader_wrapper(train_loader, global_config['input_list'])
if 'Evaluation' in global_config.keys() and global_config[
......
Global:
input_name: x
model_dir: EfficientNetB0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: x
model_dir: EfficientNetB0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: GhostNet_x1_0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: GhostNet_x1_0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: x
model_dir: InceptionV3_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: x
model_dir: save_quant_inception
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: MobileNetV1_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: MobileNetV1_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: MobileNetV3_large_x1_0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: MobileNetV3_large_x1_0_ssld_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: x
model_dir: PPHGNet_tiny_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: x
model_dir: PPHGNet_tiny_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: x
model_dir: PPLCNetV2_base_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: x
model_dir: PPLCNetV2_base_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: x
model_dir: PPLCNet_x1_0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: x
model_dir: PPLCNet_x1_0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: ResNet50_vd_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: ResNet50_vd_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: ShuffleNetV2_x1_0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: ShuffleNetV2_x1_0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: SqueezeNet1_0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: SqueezeNet1_0_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
Global:
input_name: inputs
model_dir: SwinTransformer_base_patch4_window7_224_infer
model_filename: inference.pdmodel
params_filename: inference.pdiparams
......
......@@ -27,6 +27,7 @@ from paddle.io import DataLoader
from imagenet_reader import ImageNetDataset
from paddleslim.common import load_config as load_slim_config
from paddleslim.auto_compression import AutoCompression
from paddleslim.common.dataloader import get_feed_vars
def argsparser():
......@@ -57,6 +58,9 @@ def argsparser():
# yapf: enable
def reader_wrapper(reader, input_name):
if isinstance(input_name, list) and len(input_name) == 1:
input_name = input_name[0]
def gen():
for i, (imgs, label) in enumerate(reader()):
yield {input_name: imgs}
......@@ -175,6 +179,9 @@ def main():
shuffle=True,
drop_last=True,
num_workers=0)
global_config['input_name'] = get_feed_vars(
global_config['model_dir'], global_config['model_filename'],
global_config['params_filename'])
train_dataloader = reader_wrapper(train_loader, global_config['input_name'])
ac = AutoCompression(
......
......@@ -4,7 +4,6 @@ Global:
model_filename: inference.pdmodel
params_filename: inference.pdiparams
algorithm: DB
input_name: 'x'
Distillation:
alpha: 1.0
......
......@@ -28,7 +28,7 @@ from ppocr.losses import build_loss
from ppocr.optimizer import build_optimizer
from ppocr.postprocess import build_post_process
from ppocr.metrics import build_metric
from paddleslim.common.dataloader import get_feed_vars
logger = get_logger(__name__, level=logging.INFO)
......@@ -54,6 +54,9 @@ def argsparser():
def reader_wrapper(reader, input_name):
if isinstance(input_name, list) and len(input_name) == 1:
input_name = input_name[0]
def gen():
for i, batch in enumerate(reader()):
yield {input_name: batch[0]}
......@@ -131,6 +134,9 @@ def main():
all_config['TrainConfig']['learning_rate']['T_max'] = steps
print('total training steps:', steps)
global_config['input_name'] = get_feed_vars(
global_config['model_dir'], global_config['model_filename'],
global_config['params_filename'])
ac = AutoCompression(
model_dir=global_config['model_dir'],
model_filename=global_config['model_filename'],
......
......@@ -30,6 +30,8 @@
| UNet | 量化训练 | 64.93 | - |10.228| [config](./configs/unet/unet_qat.yaml) | [model](https://bj.bcebos.com/v1/paddle-slim-models/act/PaddleSeg/qat/unet_qat.tar) |
| Deeplabv3-ResNet50 | Baseline | 79.90 | -|12.766| -| [model](https://paddleseg.bj.bcebos.com/tipc/easyedge/RES-paddle2-Deeplabv3-ResNet50.zip)|
| Deeplabv3-ResNet50 | 量化训练 | 79.26 | - |8.839|[config](./configs/deeplabv3/deeplabv3_qat.yaml) | [model](https://bj.bcebos.com/v1/paddle-slim-models/act/PaddleSeg/qat/deeplabv3_qat.tar) |
| BiSeNetV2 | Baseline | 73.17 | -|35.61| -| [model](https://bj.bcebos.com/v1/paddle-slim-models/act/PaddleSeg/BiSeNetV2.tar)|
| BiSeNetV2 | 量化训练 | 73.20 | - |15.94|[config](./configs/BiSeNetV2/BiSeNetV2_qat.yaml) | [model](https://bj.bcebos.com/v1/paddle-slim-models/act/PaddleSeg/qat/BiSeNetV2_qat.tar) |
- ARM CPU测试环境:`高通骁龙710处理器(SDM710 2*A75(2.2GHz) 6*A55(1.7GHz))`
......@@ -47,7 +49,7 @@
- PaddlePaddle >= 2.3 (可从[Paddle官网](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html)下载安装)
- PaddleSlim >= 2.3
- PaddleSeg >= 2.5
- PaddleSeg == 2.5.0
安装paddlepaddle:
```shell
......@@ -67,13 +69,13 @@ pip install paddleslim
git clone https://github.com/PaddlePaddle/PaddleSlim.git
```
安装paddleseg
安装paddleseg 2.5.0
```shell
pip install paddleseg==2.5.0
```
注:安装[PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg)的目的只是为了直接使用PaddleSeg中的Dataloader组件,不涉及模型组网等。推荐安装PaddleSeg 2.5.0, 不同版本的PaddleSeg的Dataloader返回数据的格式略有不同.
注:安装[PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg)的目的只是为了直接使用PaddleSeg中的Dataloader组件,不涉及模型组网等。本示例需安装**PaddleSeg 2.5.0**, 不同版本的PaddleSeg的Dataloader返回数据的格式略有不同.
#### 3.2 准备数据集
......
Global:
reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml
model_dir: ./BiSeNetV2
model_filename: model.pdmodel
params_filename: model.pdiparams
batch_size: 4
Distillation:
alpha: 1.0
loss: l2
node:
- conv2d_103.tmp_1
Quantization:
onnx_format: True
quantize_op_types:
- conv2d
- depthwise_conv2d
TrainConfig:
epochs: 5
eval_iter: 360
logging_iter: 100
learning_rate:
type: PiecewiseDecay
boundaries: [2]
values: [0.00001, 0.000001]
optimizer_builder:
optimizer:
type: SGD
weight_decay: 0.0005
\ No newline at end of file
Global:
reader_config: configs/deeplabv3/deeplabv3_reader.yml
reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml
model_dir: ./RES-paddle2-Deeplabv3-ResNet50
model_filename: model
params_filename: params
batch_size: 4
Distillation:
alpha: 1.0
......
batch_size: 4
train_dataset:
type: Cityscapes
dataset_root: data/cityscapes
transforms:
- type: ResizeStepScaling
min_scale_factor: 0.5
max_scale_factor: 2.0
scale_step_size: 0.25
- type: RandomPaddingCrop
crop_size: [1024, 512]
- type: RandomHorizontalFlip
- type: RandomDistort
brightness_range: 0.5
contrast_range: 0.5
saturation_range: 0.5
- type: Normalize
mode: train
val_dataset:
type: Cityscapes
dataset_root: data/cityscapes
transforms:
- type: Normalize
mode: val
......@@ -19,6 +19,7 @@ TrainConfig:
epochs: 20
eval_iter: 360
learning_rate: 0.0001
optimizer: SGD
optim_args:
weight_decay: 4.0e-05
optimizer_builder:
optimizer:
type: SGD
weight_decay: 4.0e-05
......@@ -18,12 +18,12 @@ import random
import paddle
import numpy as np
from paddleseg.cvlibs import Config as PaddleSegDataConfig
from paddleseg.utils import worker_init_fn
from paddleseg.utils import worker_init_fn, metrics
from paddleseg.core.infer import reverse_transform
from paddleslim.auto_compression import AutoCompression
from paddleslim.common import load_config as load_slim_config
from paddleseg.core.infer import reverse_transform
from paddleseg.utils import metrics
from paddleslim.common.dataloader import get_feed_vars
def argsparser():
......@@ -63,7 +63,7 @@ def eval_function(exe, compiled_test_program, test_feed_names, test_fetch_list):
print("Start evaluating (total_samples: {}, total_iters: {})...".format(
len(eval_dataset), total_iters))
for iter, (image, label) in enumerate(loader):
for iters, (image, label) in enumerate(loader):
paddle.enable_static()
label = np.array(label).astype('int64')
......@@ -97,6 +97,8 @@ def eval_function(exe, compiled_test_program, test_feed_names, test_fetch_list):
intersect_area_all = intersect_area_all + intersect_area
pred_area_all = pred_area_all + pred_area
label_area_all = label_area_all + label_area
if iters % 100 == 0:
print("Eval iter:", iters)
class_iou, miou = metrics.mean_iou(intersect_area_all, pred_area_all,
label_area_all)
......@@ -113,11 +115,14 @@ def eval_function(exe, compiled_test_program, test_feed_names, test_fetch_list):
return miou
def reader_wrapper(reader):
def reader_wrapper(reader, input_name):
if isinstance(input_name, list) and len(input_name) == 1:
input_name = input_name[0]
def gen():
for i, data in enumerate(reader()):
imgs = np.array(data[0])
yield {"x": imgs}
yield {input_name: imgs}
return gen
......@@ -139,9 +144,10 @@ def main(args):
train_dataset = data_cfg.train_dataset
global eval_dataset
eval_dataset = data_cfg.val_dataset
batch_size = config.get('batch_size')
batch_sampler = paddle.io.DistributedBatchSampler(
train_dataset,
batch_size=data_cfg.batch_size,
batch_size=batch_size if batch_size else data_cfg.batch_size,
shuffle=True,
drop_last=True)
train_loader = paddle.io.DataLoader(
......@@ -151,7 +157,10 @@ def main(args):
num_workers=0,
return_list=True,
worker_init_fn=worker_init_fn)
train_dataloader = reader_wrapper(train_loader)
input_name = get_feed_vars(config['model_dir'], config['model_filename'],
config['params_filename'])
train_dataloader = reader_wrapper(train_loader, input_name)
nranks = paddle.distributed.get_world_size()
rank_id = paddle.distributed.get_rank()
......
......@@ -5,6 +5,7 @@ from ...analysis import TableLatencyPredictor
from .prune_model import get_sparse_model, get_prune_model
from .fake_ptq import post_quant_fake
from ...common.load_model import load_inference_model
import platform
def with_variable_shape(model_dir, model_filename=None, params_filename=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册