compress.yaml 2.0 KB
Newer Older
1 2
#start_epoch(int): The epoch to insert quantization operators. default: 0
#
翟飞跃 已提交
3
#end_epoch(int): The epoch to save inference model. default: 0
4 5
#
#float_model_save_path(str): The path to save model with float weights.
翟飞跃 已提交
6
#                None means it doesn't save float model. default: None.
7 8
#
#mobile_model_save_path(str): The path to save model for paddle-mobile execution.
翟飞跃 已提交
9
#                None means it doesn't save mobile model. default: None.
10 11
#
#int8_model_save_path(str): The path to save model with int8_t weight.
翟飞跃 已提交
12
#                None means it doesn't save int8 model. default: None.
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
#
#activation_bits(int): quantization bit number for activation. default: 8.
#
#weight_bits(int): quantization bit number for weights. The bias is not quantized.
#                  default: 8.
#
#activation_quantize_type(str): quantization type for activation,
#    now support 'abs_max', 'range_abs_max' and 'moving_average_abs_max'.
#    If use 'abs_max' mode, the quantization scale will be calculated
#    dynamically each step in both training and testing period. If use
#    'range_abs_max', a static quantization scale will be calculated
#    during training and used in inference.
#
#save_in_nodes(list<str>): A list of variable names used to prune graph
#                          for saving inference model.
#
#save_out_nodes(list<str>): A list of variable names used to prune graph
#                                      for saving inference model.
version: 1.0
strategies:
    quantization_strategy:
        class: 'QuantizationStrategy'
        start_epoch: 0
        end_epoch: 0
        float_model_save_path: './output/float'
38 39
        mobile_model_save_path: './output/mobile'
        int8_model_save_path: './output/int8'
40 41 42 43 44 45 46 47 48 49 50
        weight_bits: 8
        activation_bits: 8
        weight_quantize_type: 'abs_max'
        activation_quantize_type: 'abs_max'
        save_in_nodes: ['image']
        save_out_nodes: ['quan.tmp_2']
compressor:
    epoch: 1
    checkpoint_path: './checkpoints_quan/'
    strategies:
        - quantization_strategy