diff --git a/paddlehub/finetune/strategy.py b/paddlehub/finetune/strategy.py index 5c7b59258ea9f1583443dae743af166237c30931..9073f9414e4b44078c59dfa5cb251cc031456a9f 100644 --- a/paddlehub/finetune/strategy.py +++ b/paddlehub/finetune/strategy.py @@ -120,9 +120,7 @@ def get_depth_parameter(main_program): return updated_depth_params_dict -def set_gradual_unfreeze(main_program, unfreeze_depths): - depth_params_dict = get_depth_parameter(main_program) - +def set_gradual_unfreeze(depth_params_dict, unfreeze_depths): for depth in unfreeze_depths: for index, param in enumerate(depth_params_dict[depth]): depth_params_dict[depth][index].stop_gradient = False @@ -509,7 +507,7 @@ class CombinedStrategy(DefaultStrategy): if self.max_depth > 0 and self.epoch <= self.scheduler[ "gradual_unfreeze"]["blocks"]: set_gradual_unfreeze( - self.main_program, + depth_params_dict=self.depth_params_dict, unfreeze_depths=self. sorted_depth[:self.max_depth * self.epoch // self.scheduler["gradual_unfreeze"]["blocks"]]) diff --git a/paddlehub/module/manager.py b/paddlehub/module/manager.py index 6509f7aecd0b6bebf5594a1f70b1f7ecaf20c8c7..f555a198ed1a21a6ab78ee7e1be72ca9c1b8c236 100644 --- a/paddlehub/module/manager.py +++ b/paddlehub/module/manager.py @@ -76,7 +76,7 @@ class LocalModuleManager(object): sys.modules[_item.__module__].__file__) if issubclass( _item, - hub.Module) and _file.startwith(module_file): + hub.Module) and _file.startswith(module_file): version = _item._version break sys.path.pop(0) diff --git a/paddlehub/module/module.py b/paddlehub/module/module.py index 196b43fdaa1ee7577c8e568f3d9751de88bb34d0..eb4526c0f75c8dafe8f2013b2b6ba0fbf50ca38a 100644 --- a/paddlehub/module/module.py +++ b/paddlehub/module/module.py @@ -137,7 +137,8 @@ class Module(object): _run_func_name = self._get_func_name(self.__class__, _module_runnable_func) - self._run_func = getattr(self, _run_func_name) + self._run_func = getattr(self, + _run_func_name) if _run_func_name else None self._serving_func_name = self._get_func_name(self.__class__, _module_serving_func) self._directory = directory diff --git a/paddlehub/module/nlp_module.py b/paddlehub/module/nlp_module.py index d29f455cc6596e75c698ed992f8506a828614504..65479b14d207ad6332d37511938176c8314ff047 100644 --- a/paddlehub/module/nlp_module.py +++ b/paddlehub/module/nlp_module.py @@ -26,6 +26,7 @@ import six import numpy as np import paddle.fluid as fluid +from paddlehub.common import paddle_helper from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor import paddlehub as hub from paddlehub.common.logger import logger @@ -265,6 +266,9 @@ class TransformerModule(NLPBaseModule): logger.info("Load pretraining parameters from {}.".format( pretraining_params_path)) + def param_prefix(self): + return "@HUB_%s@" % self.name + def context( self, max_seq_len=128, @@ -330,8 +334,13 @@ class TransformerModule(NLPBaseModule): place = fluid.CPUPlace() exe = fluid.Executor(place) + # To be compatible with the module v1 + vars = filter(lambda var: "tmp" not in var, + list(module_program.global_block().vars.keys())[4:]) + paddle_helper.add_vars_prefix( + program=module_program, prefix=self.param_prefix(), vars=vars) self.init_pretraining_params( - exe, self.params_path, main_program=startup_program) + exe, self.params_path, main_program=module_program) self.params_layer = {} for param in module_program.global_block().iter_parameters():