diff --git a/paddleslim/quant/quanter.py b/paddleslim/quant/quanter.py index 9b7c85668bf0afa313b8fedcd0f030c3c47b048f..9e07c03c6ee5c1bb657393bfbc175d72ebd558fc 100755 --- a/paddleslim/quant/quanter.py +++ b/paddleslim/quant/quanter.py @@ -453,30 +453,56 @@ def quant_post_static( Returns: None """ - post_training_quantization = PostTrainingQuantization( - executor=executor, - sample_generator=sample_generator, - batch_generator=batch_generator, - data_loader=data_loader, - model_dir=model_dir, - model_filename=model_filename, - params_filename=params_filename, - batch_size=batch_size, - batch_nums=batch_nums, - scope=scope, - algo=algo, - round_type=round_type, - hist_percent=hist_percent, - bias_correction=bias_correction, - quantizable_op_type=quantizable_op_type, - is_full_quantize=is_full_quantize, - weight_bits=weight_bits, - activation_bits=activation_bits, - activation_quantize_type=activation_quantize_type, - weight_quantize_type=weight_quantize_type, - onnx_format=onnx_format, - skip_tensor_list=skip_tensor_list, - optimize_model=optimize_model) + try: + post_training_quantization = PostTrainingQuantization( + executor=executor, + sample_generator=sample_generator, + batch_generator=batch_generator, + data_loader=data_loader, + model_dir=model_dir, + model_filename=model_filename, + params_filename=params_filename, + batch_size=batch_size, + batch_nums=batch_nums, + scope=scope, + algo=algo, + round_type=round_type, + hist_percent=hist_percent, + bias_correction=bias_correction, + quantizable_op_type=quantizable_op_type, + is_full_quantize=is_full_quantize, + weight_bits=weight_bits, + activation_bits=activation_bits, + activation_quantize_type=activation_quantize_type, + weight_quantize_type=weight_quantize_type, + onnx_format=onnx_format, + skip_tensor_list=skip_tensor_list, # support in Paddle >= 2.3.1 + optimize_model=optimize_model) + except: + post_training_quantization = PostTrainingQuantization( + executor=executor, + sample_generator=sample_generator, + batch_generator=batch_generator, + data_loader=data_loader, + model_dir=model_dir, + model_filename=model_filename, + params_filename=params_filename, + batch_size=batch_size, + batch_nums=batch_nums, + scope=scope, + algo=algo, + round_type=round_type, + hist_percent=hist_percent, + bias_correction=bias_correction, + quantizable_op_type=quantizable_op_type, + is_full_quantize=is_full_quantize, + weight_bits=weight_bits, + activation_bits=activation_bits, + activation_quantize_type=activation_quantize_type, + weight_quantize_type=weight_quantize_type, + onnx_format=onnx_format, + optimize_model=optimize_model) + post_training_quantization.quantize() post_training_quantization.save_quantized_model( quantize_model_path,