# step1: Pruning at epoch-0 # step2: Fine-tune from epoch-0 to epoch-120 # step3: Quantization training from epoch-121 to epoch-141 version: 1.0 pruners: pruner_1: class: 'StructurePruner' pruning_axis: '*': 0 criterions: '*': 'l1_norm' strategies: uniform_pruning_strategy: class: 'UniformPruneStrategy' pruner: 'pruner_1' start_epoch: 0 target_ratio: 0.5 pruned_params: '.*_sep_weights' metric_name: 'acc_top1' quantization_strategy: class: 'QuantizationStrategy' start_epoch: 121 end_epoch: 141 float_model_save_path: './output/float' mobile_model_save_path: './output/mobile' int8_model_save_path: './output/int8' weight_bits: 8 activation_bits: 8 weight_quantize_type: 'abs_max' activation_quantize_type: 'abs_max' compressor: epoch: 142 #init_model: './checkpoints/0' # Please enable this option for loading checkpoint. checkpoint_path: './checkpoints/' strategies: - uniform_pruning_strategy - quantization_strategy