未验证 提交 ea11d456 编写于 作者: W whs 提交者: GitHub

Add demo for combined strategies. (#1954)

* Add demo for combined strategies.

* Fix quan.

* Restore args in run.sh
上级 2a73d909
......@@ -3,7 +3,7 @@ strategies:
quantization_strategy:
class: 'QuantizationStrategy'
start_epoch: 0
end_epoch: 0
end_epoch: 20
float_model_save_path: './output/float'
mobile_model_save_path: './output/mobile'
int8_model_save_path: './output/int8'
......@@ -14,7 +14,7 @@ strategies:
save_in_nodes: ['image']
save_out_nodes: ['fc_0.tmp_2']
compressor:
epoch: 6
epoch: 21
checkpoint_path: './checkpoints_quan/'
strategies:
- quantization_strategy
......@@ -25,7 +25,7 @@ fi
cd -
# for distillation
#--------------------
#-----------------
export CUDA_VISIBLE_DEVICES=0
python compress.py \
--model "MobileNet" \
......@@ -35,7 +35,7 @@ python compress.py \
# for sensitivity filter pruning
#---------------------------
#-------------------------------
#export CUDA_VISIBLE_DEVICES=0
#python compress.py \
#--model "MobileNet" \
......@@ -51,7 +51,7 @@ python compress.py \
#--compress_config ./configs/filter_pruning_uniform.yaml
# for quantization
#---------------------------
#-----------------
#export CUDA_VISIBLE_DEVICES=0
#python compress.py \
#--batch_size 64 \
......@@ -59,3 +59,21 @@ python compress.py \
#--pretrained_model ./pretrain/MobileNetV1_pretrained \
#--compress_config ./configs/quantization.yaml
# for distillation with quantization
#-----------------------------------
#export CUDA_VISIBLE_DEVICES=0
#python compress.py \
#--model "MobileNet" \
#--teacher_model "ResNet50" \
#--teacher_pretrained_model ./data/pretrain/ResNet50_pretrained \
#--compress_config ./configs/quantization_dist.yaml
# for uniform filter pruning with quantization
#---------------------------------------------
#export CUDA_VISIBLE_DEVICES=0
#python compress.py \
#--model "MobileNet" \
#--pretrained_model ./data/pretrain/MobileNetV1_pretrained \
#--compress_config ./configs/quantization_pruning.yaml
# Step1: distillation training from epoch-0 to epoch-120
# Step2: quantization training from epoch-121 to epoch-141
version: 1.0
distillers:
fsp_distiller:
class: 'FSPDistiller'
teacher_pairs: [['res2a_branch2a.conv2d.output.1.tmp_0', 'res3a_branch2a.conv2d.output.1.tmp_0']]
student_pairs: [['depthwise_conv2d_1.tmp_0', 'conv2d_3.tmp_0']]
distillation_loss_weight: 1
l2_distiller:
class: 'L2Distiller'
teacher_feature_map: 'fc_1.tmp_0'
student_feature_map: 'fc_0.tmp_0'
distillation_loss_weight: 1
strategies:
distillation_strategy:
class: 'DistillationStrategy'
distillers: ['fsp_distiller', 'l2_distiller']
start_epoch: 0
end_epoch: 120
quantization_strategy:
class: 'QuantizationStrategy'
start_epoch: 121
end_epoch: 141
float_model_save_path: './output/float'
mobile_model_save_path: './output/mobile'
int8_model_save_path: './output/int8'
weight_bits: 8
activation_bits: 8
weight_quantize_type: 'abs_max'
activation_quantize_type: 'abs_max'
compressor:
epoch: 142
checkpoint_path: './checkpoints/'
strategies:
- distillation_strategy
- quantization_strategy
# step1: Pruning at epoch-0
# step2: Fine-tune from epoch-0 to epoch-120
# step3: Quantization training from epoch-121 to epoch-141
version: 1.0
pruners:
pruner_1:
class: 'StructurePruner'
pruning_axis:
'*': 0
criterions:
'*': 'l1_norm'
strategies:
uniform_pruning_strategy:
class: 'UniformPruneStrategy'
pruner: 'pruner_1'
start_epoch: 0
target_ratio: 0.5
pruned_params: '.*_sep_weights'
metric_name: 'acc_top1'
quantization_strategy:
class: 'QuantizationStrategy'
start_epoch: 121
end_epoch: 141
float_model_save_path: './output/float'
mobile_model_save_path: './output/mobile'
int8_model_save_path: './output/int8'
weight_bits: 8
activation_bits: 8
weight_quantize_type: 'abs_max'
activation_quantize_type: 'abs_max'
compressor:
epoch: 142
checkpoint_path: './checkpoints/'
strategies:
- uniform_pruning_strategy
- quantization_strategy
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册