From 3800f192b9b348facfaec7b89867e613e7c01922 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Fri, 10 Jun 2022 22:35:31 -0500 Subject: [PATCH] fix add_n incompatible error (#43395) --- paddle/fluid/pybind/eager_method.cc | 12 ++++++++++++ python/paddle/optimizer/optimizer.py | 4 +++- python/paddle/tensor/math.py | 3 +++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index ab6b8edd52e..ae00953f2cf 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1417,6 +1417,16 @@ static PyObject* tensor_method_get_non_zero_cols(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor_method_is_dense(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + if (!self->tensor.defined()) { + return ToPyObject(false); + } + return ToPyObject(self->tensor.is_dense_tensor()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + static PyObject* tensor_method_is_sparse(TensorObject* self, PyObject* args, PyObject* kwargs) { EAGER_TRY @@ -1682,6 +1692,8 @@ PyMethodDef variable_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"clear_gradient", (PyCFunction)(void (*)(void))tensor_clear_gradient, METH_VARARGS | METH_KEYWORDS, NULL}, + {"is_dense", (PyCFunction)(void (*)(void))tensor_method_is_dense, + METH_VARARGS | METH_KEYWORDS, NULL}, {"_zero_grads", (PyCFunction)(void (*)(void))tensor__zero_grads, METH_VARARGS | METH_KEYWORDS, NULL}, {"_share_buffer_to", (PyCFunction)(void (*)(void))tensor__share_buffer_to, diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index ec367c7c710..4534c39b008 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -1032,7 +1032,9 @@ class Optimizer(object): assert regularization_term is not None if framework.in_dygraph_mode(): - return _C_ops.final_state_add_n([grad, regularization_term]) + if grad.is_dense() and regularization_term.is_dense(): + return _C_ops.final_state_add_n([grad, regularization_term]) + return _C_ops.sum([grad, regularization_term]) elif framework._in_legacy_dygraph(): return _C_ops.sum([grad, regularization_term]) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 4611cbb20c9..ffca233ff16 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1377,6 +1377,9 @@ def add_n(inputs, name=None): if in_dygraph_mode(): if isinstance(inputs, Variable): inputs = [inputs] + for x in inputs: + if not x.is_dense(): + return _C_ops.sum(inputs, 'use_mkldnn', False) return _C_ops.final_state_add_n(inputs) if _in_legacy_dygraph(): if isinstance(inputs, Variable): -- GitLab