未验证 提交 0d5c27b2 编写于 作者: Z zhangbo9674 提交者: GitHub

fix adam is_sparse bug in final state dygraph (#41125)

上级 2f1c1ae5
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from .optimizer import Optimizer from .optimizer import Optimizer
from ..fluid import core from ..fluid import core
from ..fluid import framework from ..fluid import framework
from ..fluid.framework import Variable from ..fluid.framework import Variable, _in_legacy_dygraph, in_dygraph_mode
from ..fluid import layers from ..fluid import layers
from ..fluid import unique_name from ..fluid import unique_name
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
...@@ -431,11 +431,20 @@ class Adam(Optimizer): ...@@ -431,11 +431,20 @@ class Adam(Optimizer):
continue continue
if param._grad_ivar() is not None: if param._grad_ivar() is not None:
grad_var = param._grad_ivar() grad_var = param._grad_ivar()
if hasattr(grad_var, "_is_sparse") and grad_var._is_sparse( if in_dygraph_mode():
) and self.regularization is not None: if hasattr(grad_var, "is_selected_rows"
raise RuntimeError( ) and grad_var.is_selected_rows(
"Adam don't support weight_decay with sparse parameters, please set it to None." ) and self.regularization is not None:
) raise RuntimeError(
"Adam don't support weight_decay with sparse parameters, please set it to None."
)
else:
if hasattr(grad_var,
"_is_sparse") and grad_var._is_sparse(
) and self.regularization is not None:
raise RuntimeError(
"Adam don't support weight_decay with sparse parameters, please set it to None."
)
params_grads.append((param, grad_var)) params_grads.append((param, grad_var))
optimize_ops = self._apply_optimize( optimize_ops = self._apply_optimize(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册