diff --git a/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml b/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml index 035c289022e3142f7e12e18b241d4fed3d4ca270..a007ebfed6b34112b8cbe7eba528d60c9df6fbaf 100644 --- a/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml @@ -1,5 +1,5 @@ Global: - reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml + reader_config: configs/deeplabv3/deeplabv3_reader.yml model_dir: ./RES-paddle2-Deeplabv3-ResNet50 model_filename: model params_filename: params @@ -11,15 +11,16 @@ Distillation: - conv2d_123.tmp_1 Quantization: + onnx_format: True quantize_op_types: - conv2d - depthwise_conv2d TrainConfig: - epochs: 10 + epochs: 1 eval_iter: 360 learning_rate: 0.0001 optimizer_builder: optimizer: type: SGD - weight_decay: 0.0005 + weight_decay: 0.0005 \ No newline at end of file diff --git a/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_reader.yml b/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_reader.yml new file mode 100644 index 0000000000000000000000000000000000000000..498176eaeb02daae44f64022b7d051b31fdc681e --- /dev/null +++ b/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_reader.yml @@ -0,0 +1,24 @@ +batch_size: 4 +train_dataset: + type: Cityscapes + dataset_root: data/cityscapes + transforms: + - type: ResizeStepScaling + min_scale_factor: 0.5 + max_scale_factor: 2.0 + scale_step_size: 0.25 + - type: RandomPaddingCrop + crop_size: [1024, 512] + - type: RandomHorizontalFlip + - type: RandomDistort + brightness_range: 0.5 + contrast_range: 0.5 + saturation_range: 0.5 + - type: Normalize + mode: train +val_dataset: + type: Cityscapes + dataset_root: data/cityscapes + transforms: + - type: Normalize + mode: val diff --git a/example/auto_compression/semantic_segmentation/run.py b/example/auto_compression/semantic_segmentation/run.py index 2506fbf228650e859dd59a36d4600e1985211e83..aa1da83859a11b957515972c226da39e57a65657 100644 --- a/example/auto_compression/semantic_segmentation/run.py +++ b/example/auto_compression/semantic_segmentation/run.py @@ -137,6 +137,7 @@ def main(args): # step1: load dataset config and create dataloader data_cfg = PaddleSegDataConfig(config['reader_config']) train_dataset = data_cfg.train_dataset + global eval_dataset eval_dataset = data_cfg.val_dataset batch_sampler = paddle.io.DistributedBatchSampler( train_dataset, @@ -163,7 +164,7 @@ def main(args): save_dir=args.save_dir, config=all_config, train_dataloader=train_dataloader, - eval_callback=eval_function if nranks > 1 and rank_id != 0 else None, + eval_callback=eval_function if rank_id == 0 else None, deploy_hardware=config.get('deploy_hardware') or None, input_shapes=config.get('input_shapes', None))