From e3ee2ad845d6169f2596ec850a6527aca4330478 Mon Sep 17 00:00:00 2001 From: xiongkun Date: Mon, 23 May 2022 22:02:01 +0800 Subject: [PATCH] sync stop_gradient in ParamBase. Fix the Different Behavior between Eval and Train (#42899) --- python/paddle/fluid/dygraph/varbase_patch_methods.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 8049a8b8741..add3d73efc7 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -101,8 +101,11 @@ def monkey_patch_varbase(): # Note: getattr(self, attr, None) will call x.grad=x.gradient(), but gradient() only available in dygraph. # It will fail. So, for propery that different between dynamic and static graph, should not getattr(self, attr, None). attr_not_need_keys = ['grad', 'T', 'place', '_place_str'] + param_keys = ['stop_gradient', 'trainable'] if isinstance(self, (ParamBase, EagerParamBase)): attr_kwargs = self.__dict__.copy() + for key in param_keys: + attr_kwargs[key] = getattr(self, key) else: attr_names = [] for name in dir(self): -- GitLab