未验证 提交 82da1f14 编写于 作者: C ceci3 提交者: GitHub

fix unittest (#1633)

上级 cb57443e
repos:
- repo: https://github.com/Lucas-C/pre-commit-hooks.git
sha: v1.0.1
rev: v1.3.1
hooks:
- id: remove-crlf
files: .∗
......@@ -9,12 +9,12 @@ repos:
- id: remove-tabs
files: \.(md|yml)$
- repo: https://github.com/PaddlePaddle/mirrors-yapf.git
sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37
rev: v0.16.2
hooks:
- id: yapf
files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
- repo: https://github.com/pre-commit/pre-commit-hooks
sha: 5bf6c09bfa1297d3692cadd621ef95f1284e33c0
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-merge-conflict
......
......@@ -37,25 +37,34 @@ _logger = get_logger(__name__, level=logging.INFO)
############################################################################################################
_train_config_default = {
# configs of training aware quantization with infermodel
"num_epoch": 1000, # training epoch num
"max_iter": -1, # max training iteration num
"num_epoch":
1000, # training epoch num
"max_iter":
-1, # max training iteration num
"save_iter_step":
1000, # save quant model checkpoint every save_iter_step iteration
"learning_rate": 0.0001, # learning rate
"weight_decay": 0.0001, # weight decay
"use_pact": False, # use pact quantization or not
"learning_rate":
0.0001, # learning rate
"weight_decay":
0.0001, # weight decay
"use_pact":
False, # use pact quantization or not
# quant model checkpoints save path
"quant_model_ckpt_path": "./quant_model_checkpoints/",
"quant_model_ckpt_path":
"./quant_model_checkpoints/",
# storage directory of teacher model + teacher model name (excluding suffix)
"teacher_model_path_prefix": None,
"teacher_model_path_prefix":
None,
# storage directory of model + model name (excluding suffix)
"model_path_prefix": None,
"model_path_prefix":
None,
""" distillation node configuration:
the name of the distillation supervision nodes is configured as a list,
and the teacher node and student node are arranged in pairs.
for example, ["teacher_fc_0.tmp_0", "fc_0.tmp_0", "teacher_batch_norm_24.tmp_4", "batch_norm_24.tmp_4"]
"""
"node": None
"node":
None
}
......@@ -184,9 +193,9 @@ def quant_aware_with_infermodel(executor,
place,
quant_config,
scope=scope,
act_preprocess_func=act_preprocess_func,
optimizer_func=optimizer_func,
executor=pact_executor,
act_preprocess_func=None,
optimizer_func=None,
executor=None,
for_test=True)
train_program = quant_aware(
train_program,
......@@ -225,8 +234,7 @@ def quant_aware_with_infermodel(executor,
test_callback(compiled_test_prog, test_feed_names,
test_fetch_list, checkpoint_name)
iter_sum += 1
if train_config["max_iter"] >= 0 and iter_sum > train_config[
"max_iter"]:
if train_config["max_iter"] >= 0 and iter_sum > train_config["max_iter"]:
return
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册