From fc891aed7befc3bcbe911d0563ccb2f18c00e22c Mon Sep 17 00:00:00 2001 From: xiongkun Date: Fri, 20 May 2022 08:48:31 +0000 Subject: [PATCH] sync stop_gradient in ParamBase. Fix the Different Behavior between Eval and Train --- python/paddle/fluid/dygraph/varbase_patch_methods.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index a93facbc34a..ac304e289e9 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -101,8 +101,11 @@ def monkey_patch_varbase(): # Note: getattr(self, attr, None) will call x.grad=x.gradient(), but gradient() only available in dygraph. # It will fail. So, for propery that different between dynamic and static graph, should not getattr(self, attr, None). attr_not_need_keys = ['grad', 'T', 'place', '_place_str'] + param_keys = ['stop_gradient', 'trainable'] if isinstance(self, (ParamBase, EagerParamBase)): attr_kwargs = self.__dict__.copy() + for key in param_keys: + attr_kwargs[key] = getattr(self, key) else: attr_names = [] for name in dir(self): -- GitLab