未验证 提交 6a438d9c 编写于 作者: G Guanghua Yu 提交者: GitHub

fix ptq compatibility (#1208)

* fix ptq compatibility

* touch off CI
上级 919a9b15
...@@ -453,30 +453,56 @@ def quant_post_static( ...@@ -453,30 +453,56 @@ def quant_post_static(
Returns: Returns:
None None
""" """
post_training_quantization = PostTrainingQuantization( try:
executor=executor, post_training_quantization = PostTrainingQuantization(
sample_generator=sample_generator, executor=executor,
batch_generator=batch_generator, sample_generator=sample_generator,
data_loader=data_loader, batch_generator=batch_generator,
model_dir=model_dir, data_loader=data_loader,
model_filename=model_filename, model_dir=model_dir,
params_filename=params_filename, model_filename=model_filename,
batch_size=batch_size, params_filename=params_filename,
batch_nums=batch_nums, batch_size=batch_size,
scope=scope, batch_nums=batch_nums,
algo=algo, scope=scope,
round_type=round_type, algo=algo,
hist_percent=hist_percent, round_type=round_type,
bias_correction=bias_correction, hist_percent=hist_percent,
quantizable_op_type=quantizable_op_type, bias_correction=bias_correction,
is_full_quantize=is_full_quantize, quantizable_op_type=quantizable_op_type,
weight_bits=weight_bits, is_full_quantize=is_full_quantize,
activation_bits=activation_bits, weight_bits=weight_bits,
activation_quantize_type=activation_quantize_type, activation_bits=activation_bits,
weight_quantize_type=weight_quantize_type, activation_quantize_type=activation_quantize_type,
onnx_format=onnx_format, weight_quantize_type=weight_quantize_type,
skip_tensor_list=skip_tensor_list, onnx_format=onnx_format,
optimize_model=optimize_model) skip_tensor_list=skip_tensor_list, # support in Paddle >= 2.3.1
optimize_model=optimize_model)
except:
post_training_quantization = PostTrainingQuantization(
executor=executor,
sample_generator=sample_generator,
batch_generator=batch_generator,
data_loader=data_loader,
model_dir=model_dir,
model_filename=model_filename,
params_filename=params_filename,
batch_size=batch_size,
batch_nums=batch_nums,
scope=scope,
algo=algo,
round_type=round_type,
hist_percent=hist_percent,
bias_correction=bias_correction,
quantizable_op_type=quantizable_op_type,
is_full_quantize=is_full_quantize,
weight_bits=weight_bits,
activation_bits=activation_bits,
activation_quantize_type=activation_quantize_type,
weight_quantize_type=weight_quantize_type,
onnx_format=onnx_format,
optimize_model=optimize_model)
post_training_quantization.quantize() post_training_quantization.quantize()
post_training_quantization.save_quantized_model( post_training_quantization.save_quantized_model(
quantize_model_path, quantize_model_path,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册