diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9d849f12a1bfea8e79f484c13438d3329f2bc494..e58400d0f9132814bdb9f96b978a0e8298c1faad 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/Lucas-C/pre-commit-hooks.git - sha: v1.0.1 + rev: v1.3.1 hooks: - id: remove-crlf files: .∗ @@ -9,12 +9,12 @@ repos: - id: remove-tabs files: \.(md|yml)$ - repo: https://github.com/PaddlePaddle/mirrors-yapf.git - sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37 + rev: v0.16.2 hooks: - id: yapf files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ - repo: https://github.com/pre-commit/pre-commit-hooks - sha: 5bf6c09bfa1297d3692cadd621ef95f1284e33c0 + rev: v4.4.0 hooks: - id: check-added-large-files - id: check-merge-conflict diff --git a/paddleslim/quant/quant_aware_with_infermodel.py b/paddleslim/quant/quant_aware_with_infermodel.py index 83c60b409f5f7baa047e6141e0187ea4ec5e7210..de085a134dae154975944a541477d3046ce90f72 100644 --- a/paddleslim/quant/quant_aware_with_infermodel.py +++ b/paddleslim/quant/quant_aware_with_infermodel.py @@ -37,25 +37,34 @@ _logger = get_logger(__name__, level=logging.INFO) ############################################################################################################ _train_config_default = { # configs of training aware quantization with infermodel - "num_epoch": 1000, # training epoch num - "max_iter": -1, # max training iteration num + "num_epoch": + 1000, # training epoch num + "max_iter": + -1, # max training iteration num "save_iter_step": 1000, # save quant model checkpoint every save_iter_step iteration - "learning_rate": 0.0001, # learning rate - "weight_decay": 0.0001, # weight decay - "use_pact": False, # use pact quantization or not + "learning_rate": + 0.0001, # learning rate + "weight_decay": + 0.0001, # weight decay + "use_pact": + False, # use pact quantization or not # quant model checkpoints save path - "quant_model_ckpt_path": "./quant_model_checkpoints/", + "quant_model_ckpt_path": + "./quant_model_checkpoints/", # storage directory of teacher model + teacher model name (excluding suffix) - "teacher_model_path_prefix": None, + "teacher_model_path_prefix": + None, # storage directory of model + model name (excluding suffix) - "model_path_prefix": None, + "model_path_prefix": + None, """ distillation node configuration: the name of the distillation supervision nodes is configured as a list, and the teacher node and student node are arranged in pairs. for example, ["teacher_fc_0.tmp_0", "fc_0.tmp_0", "teacher_batch_norm_24.tmp_4", "batch_norm_24.tmp_4"] """ - "node": None + "node": + None } @@ -184,9 +193,9 @@ def quant_aware_with_infermodel(executor, place, quant_config, scope=scope, - act_preprocess_func=act_preprocess_func, - optimizer_func=optimizer_func, - executor=pact_executor, + act_preprocess_func=None, + optimizer_func=None, + executor=None, for_test=True) train_program = quant_aware( train_program, @@ -225,8 +234,7 @@ def quant_aware_with_infermodel(executor, test_callback(compiled_test_prog, test_feed_names, test_fetch_list, checkpoint_name) iter_sum += 1 - if train_config["max_iter"] >= 0 and iter_sum > train_config[ - "max_iter"]: + if train_config["max_iter"] >= 0 and iter_sum > train_config["max_iter"]: return