未验证 提交 e3a06180 编写于 作者: C ceci3 提交者: GitHub

revert choose ptq_hpo when model_type is transformer (#1238)

上级 52a13894
...@@ -241,16 +241,6 @@ def prepare_strategy(executor, ...@@ -241,16 +241,6 @@ def prepare_strategy(executor,
def get_final_quant_config(ptq_loss, model_type=None): def get_final_quant_config(ptq_loss, model_type=None):
""" transform quantization tester config to real quantization config """ """ transform quantization tester config to real quantization config """
### use ptq & hpo when model_type is transformer
if model_type == 'transformer':
quant_config = Quantization(**default_quant_config)
hpo_config = HyperParameterOptimization(**default_hpo_config)
configs = [{
'Quantization': quant_config,
'HyperParameterOptimization': hpo_config
}]
return configs
### if emd loss less than MAGIC_MIN_EMD_DISTANCE, final compress. ### if emd loss less than MAGIC_MIN_EMD_DISTANCE, final compress.
if ptq_loss < MAGIC_MIN_EMD_DISTANCE: if ptq_loss < MAGIC_MIN_EMD_DISTANCE:
return None return None
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册