diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index b254b5d41d3ab41b7702e8848247df0dcd00a997..bb3464665411c80959130c5f6cd3414fccbda8bc 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -34,6 +34,10 @@ limitations under the License. */ namespace paddle { namespace pybind { +extern void InitEagerTensorWithNumpyValue(EagerTensorObject* self, + const pybind11::object& array, + bool zero_copy); + extern PyTypeObject* p_eager_tensor_type; static PyObject* eager_tensor_method_numpy(EagerTensorObject* self, @@ -359,6 +363,20 @@ static PyObject* eager_tensor_method_get_underline_tensor( EAGER_CATCH_AND_THROW_RETURN_NULL } +// NOTE(wuweilong): Set value and not change self's original place +static PyObject* eager_tensor_method_set_value(EagerTensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + VLOG(4) << "Value " << self->eager_tensor.name(); + pybind11::object numpy_value = + pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true); + InitEagerTensorWithNumpyValue(self, numpy_value, false); + Py_INCREF(Py_None); + return Py_None; + EAGER_CATCH_AND_THROW_RETURN_NULL +} + PyMethodDef variable_methods[] = { {"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -393,6 +411,8 @@ PyMethodDef variable_methods[] = { {"get_tensor", (PyCFunction)(void (*)(void))eager_tensor_method_get_underline_tensor, METH_VARARGS | METH_KEYWORDS, NULL}, + {"_set_value", (PyCFunction)(void (*)(void))eager_tensor_method_set_value, + METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL}}; } // namespace pybind diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 8fc6bd818bc8fe81b56f80f681bfdcc88f3869b7..f5d569828775e6bcc90ffecb3d820696bf0e56c0 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -180,6 +180,10 @@ def monkey_patch_varbase(): "Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( self.name, self_tensor_np.dtype, value_np.dtype) + # NOTE(wuweilong): self could be VarBase or EagerTensor, the subsequent behavior are defined in different files + # if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc + # if self is EagerTensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc + # this Interface behavior will be unifed in the future. self.value().get_tensor().set(value_np, framework._current_expected_place()) diff --git a/python/paddle/fluid/tests/unittests/test_egr_python_api.py b/python/paddle/fluid/tests/unittests/test_egr_python_api.py index ba0421d6eb32d3a95b79d264139d17ed1bd22d2f..d6bf768bee7744524d33082b2cda81ea4870e534 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_python_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_python_api.py @@ -763,6 +763,24 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): paddle.fluid.framework._current_expected_place()) self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized()) + def test_set_value(self): + with _test_eager_guard(): + ori_arr = np.random.rand(4, 16, 16, 32).astype('float32') + egr_tensor = core.eager.EagerTensor(value=ori_arr) + self.assertEqual(egr_tensor.stop_gradient, True) + self.assertEqual(egr_tensor.shape, [4, 16, 16, 32]) + self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr)) + ori_place = egr_tensor.place + + new_arr = np.random.rand(4, 4, 16, 32).astype('float32') + self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr)) + + egr_tensor._set_value(new_arr) + self.assertEqual(egr_tensor.stop_gradient, True) + self.assertTrue(egr_tensor.place._equals(ori_place)) + self.assertEqual(egr_tensor.shape, [4, 4, 16, 32]) + self.assertTrue(np.array_equal(egr_tensor.numpy(), new_arr)) + class EagerParamBaseUsageTestCase(unittest.TestCase): def test_print(self): @@ -856,6 +874,17 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): egr_tensor12.backward() self.assertTrue(np.array_equal(egr_tensor12.gradient(), arr)) + def test_set_value(self): + with _test_eager_guard(): + linear = paddle.nn.Linear(1, 3) + ori_place = linear.weight.place + new_weight = np.ones([1, 3]).astype('float32') + self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight)) + + linear.weight._set_value(new_weight) + self.assertTrue(np.array_equal(linear.weight.numpy(), new_weight)) + self.assertTrue(linear.weight.place._equals(ori_place)) + class EagerGuardTestCase(unittest.TestCase): def test__test_eager_guard(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py index c18dab61fc5ab3d7f0ae98c2149185cd83ec4b39..0bc56294876d3f8bd6fd780f5e0f482e907a5f2a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py @@ -21,6 +21,7 @@ import paddle.nn as nn import paddle.fluid as fluid import numpy as np +from paddle.fluid.framework import _test_eager_guard class LeNetDygraph(fluid.dygraph.Layer): @@ -70,7 +71,7 @@ def init_weights(layer): class TestLayerApply(unittest.TestCase): - def test_apply_init_weight(self): + def func_apply_init_weight(self): with fluid.dygraph.guard(): net = LeNetDygraph() @@ -84,6 +85,11 @@ class TestLayerApply(unittest.TestCase): np.testing.assert_allclose(layer.weight.numpy(), 0.7) np.testing.assert_allclose(layer.bias.numpy(), -0.2) + def test_apply_init_weight(self): + with _test_eager_guard(): + self.func_apply_init_weight() + self.func_apply_init_weight() + if __name__ == '__main__': unittest.main()