diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 84d0c4471f8df7db969d58216bb3129323fe2e3c..198e042e2c67b82c1556f6ee6ceddef41c642040 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -193,23 +193,53 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, } if (self->tensor.is_cpu() || self->tensor.is_gpu_pinned()) { - auto dense_tensor = - std::dynamic_pointer_cast(self->tensor.impl()); platform::CPUPlace place; - // deep copy - paddle::memory::Copy(place, reinterpret_cast( - pybind11::detail::array_proxy(array)->data), - place, dense_tensor->data(), sizeof_dtype * numel); + if (self->tensor.is_selected_rows()) { + VLOG(6) << "Getting SelectedRows's numpy value"; + auto* selected_rows = + static_cast(self->tensor.impl().get()); + auto* dense_tensor = static_cast( + selected_rows->mutable_value()); + + // deep copy + paddle::memory::Copy( + place, + reinterpret_cast(pybind11::detail::array_proxy(array)->data), + place, dense_tensor->data(), sizeof_dtype * numel); + } else { + VLOG(6) << "Getting DenseTensor's numpy value"; + auto dense_tensor = + std::dynamic_pointer_cast(self->tensor.impl()); + // deep copy + paddle::memory::Copy( + place, + reinterpret_cast(pybind11::detail::array_proxy(array)->data), + place, dense_tensor->data(), sizeof_dtype * numel); + } + #if defined(PADDLE_WITH_CUDA) } else if (self->tensor.is_gpu()) { - auto dense_tensor = - std::dynamic_pointer_cast(self->tensor.impl()); - - paddle::platform::GpuMemcpySync( - pybind11::detail::array_proxy(array)->data, dense_tensor->data(), - paddle::framework::DataTypeSize(dense_tensor->dtype()) * - dense_tensor->numel(), - cudaMemcpyDeviceToHost); + if (self->tensor.is_selected_rows()) { + VLOG(6) << "Getting SelectedRows's numpy value"; + auto* selected_rows = + static_cast(self->tensor.impl().get()); + auto* dense_tensor = static_cast( + selected_rows->mutable_value()); + paddle::platform::GpuMemcpySync( + pybind11::detail::array_proxy(array)->data, dense_tensor->data(), + paddle::framework::DataTypeSize(dense_tensor->dtype()) * + dense_tensor->numel(), + cudaMemcpyDeviceToHost); + } else { + VLOG(6) << "Getting DenseTensor's numpy value"; + auto dense_tensor = + std::dynamic_pointer_cast(self->tensor.impl()); + paddle::platform::GpuMemcpySync( + pybind11::detail::array_proxy(array)->data, dense_tensor->data(), + paddle::framework::DataTypeSize(dense_tensor->dtype()) * + dense_tensor->numel(), + cudaMemcpyDeviceToHost); + } #endif } else { PADDLE_THROW(platform::errors::InvalidArgument( @@ -1149,6 +1179,26 @@ static PyObject* tensor__inplace_version(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor_method_is_selected_rows(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + return ToPyObject(self->tensor.is_selected_rows()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* tensor_method_get_rows(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + PADDLE_ENFORCE(self->tensor.is_selected_rows(), + paddle::platform::errors::Fatal( + "this method is only effective for SelectedRows")); + auto selected_rows = + std::dynamic_pointer_cast(self->tensor.impl()); + return ToPyObject(selected_rows->rows()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + PyMethodDef variable_methods[] = { {"numpy", (PyCFunction)(void (*)(void))tensor_method_numpy, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -1237,6 +1287,11 @@ PyMethodDef variable_methods[] = { /***the method of sparse tensor****/ {"_inplace_version", (PyCFunction)(void (*)(void))tensor__inplace_version, METH_VARARGS | METH_KEYWORDS, NULL}, + {"is_selected_rows", + (PyCFunction)(void (*)(void))tensor_method_is_selected_rows, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"rows", (PyCFunction)(void (*)(void))tensor_method_get_rows, + METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL}}; } // namespace pybind diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index 984308498908142da23fc585454623a93cdac906..826deae498cb598028f4922590896d6373ed29d3 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -26,7 +26,7 @@ from . import core from . import name_scope from .dygraph import base as imperative_base from .data_feeder import check_variable_and_dtype -from .framework import _non_static_mode +from .framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph from .layer_helper import LayerHelper from .framework import default_main_program from paddle import _C_ops @@ -70,8 +70,14 @@ def _squared_l2_norm(x): sum_square = layers.reduce_sum(square) return sum_square - if _non_static_mode(): + if in_dygraph_mode(): + if x.is_selected_rows(): + new_x = paddle.to_tensor(x.numpy()) + return _C_ops.squared_l2_norm(new_x) return _C_ops.squared_l2_norm(x) + else: + if _in_legacy_dygraph(): + return _C_ops.squared_l2_norm(x) op_type = 'squared_l2_norm' check_variable_and_dtype(x, 'x', ['float32', 'float64'], op_type) diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index a2a9a7b48c9b691c2198719f8ce0fbbe8534d1e7..2107b12b3a8f525fbf78b038a74a92a8d756f2f5 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -325,7 +325,8 @@ def monkey_patch_varbase(): if framework._in_eager_mode_: if self.grad is None: return None - # TODO(wanghuancoder) support SELECTED_ROWS + if self.grad.is_selected_rows(): + return (np.array(self.grad.numpy()), np.array(self.grad.rows())) return self.grad.numpy() else: if self._grad_ivar() is None: diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py index 97f7162e9979c504de0e29206a7b6d03884e3e19..8bb4088dc3bf9a42725806f2c6393b86965b5ea4 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py @@ -22,6 +22,7 @@ from paddle.fluid.optimizer import SGDOptimizer import numpy as np import paddle.fluid.core as core import paddle +from paddle.fluid.framework import _test_eager_guard class SimpleNet(paddle.nn.Layer): @@ -39,7 +40,7 @@ class SimpleNet(paddle.nn.Layer): class TestSimpleNet(unittest.TestCase): - def test_selectedrows_gradient1(self): + def func_selectedrows_gradient1(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) @@ -77,7 +78,12 @@ class TestSimpleNet(unittest.TestCase): self.assertTrue(input_emb.gradient() is not None) paddle.enable_static() - def test_selectedrows_gradient2(self): + def test_selectedrows_gradient1(self): + with _test_eager_guard(): + self.func_selectedrows_gradient1() + self.func_selectedrows_gradient1() + + def func_selectedrows_gradient2(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) @@ -113,6 +119,11 @@ class TestSimpleNet(unittest.TestCase): input_emb.clear_gradient() self.assertTrue(input_emb.gradient() is not None) + def test_selectedrows_gradient2(self): + with _test_eager_guard(): + self.func_selectedrows_gradient2() + self.func_selectedrows_gradient2() + if __name__ == '__main__': unittest.main()