From 30f2fb95830dab7894c3e5b120146a2bd8ed11f0 Mon Sep 17 00:00:00 2001 From: ceci3 Date: Mon, 4 Jul 2022 17:42:57 +0800 Subject: [PATCH] revert choose ptq_hpo when model_type is transformer (#1238) (#1240) --- paddleslim/auto_compression/auto_strategy.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/paddleslim/auto_compression/auto_strategy.py b/paddleslim/auto_compression/auto_strategy.py index a1eb0b0f..2826601a 100644 --- a/paddleslim/auto_compression/auto_strategy.py +++ b/paddleslim/auto_compression/auto_strategy.py @@ -241,16 +241,6 @@ def prepare_strategy(executor, def get_final_quant_config(ptq_loss, model_type=None): """ transform quantization tester config to real quantization config """ - ### use ptq & hpo when model_type is transformer - if model_type == 'transformer': - quant_config = Quantization(**default_quant_config) - hpo_config = HyperParameterOptimization(**default_hpo_config) - configs = [{ - 'Quantization': quant_config, - 'HyperParameterOptimization': hpo_config - }] - return configs - ### if emd loss less than MAGIC_MIN_EMD_DISTANCE, final compress. if ptq_loss < MAGIC_MIN_EMD_DISTANCE: return None -- GitLab