未验证 提交 8c5c1046 编写于 作者: W Weilong Wu 提交者: GitHub

Support test_imperative apply and Add a setter for EagerTensor (#39016)

* Rearranged Eager AutoCodeGen directory structure

* Removed USE_OP in Eager AutoCodeGen

* Enabled generation for Operators without Grad/Inputs/Outputs

* Resolved operators without input

* Fixed merge conflicts

* Enabled Eager AutoCodeGen for 10+ more operators

* Refactored Eager AutoCodeGen with more organized helper objects

* Enabled Eager AutoCodeGen for operators with multiple OpBases

* Adjusted Eager AutoCodeGen to Enable Passing Output Tensor as Input Argument

* Handled Dispensable Inputs/Outputs in Eager AutoCodeGen

* Adjusted function generation/call between Python-C API & Dygraph API

* Synchronized auto-generated Python-C API with Dygraph Forward Functions

* support more eager tensor api

* fix merge compile error

* fix compile error and fit develop code

* support pure CPU

* fix some logic error in eager_mode

* support _varbase_creator in eager mode

* Added safe_initialized interface to EagerTensor for use in processing dispensable inputs

* for eager mode

* refine

* support multiple constructor for eager tensor

* add place related code

* polish code

* specific randint with dtype of int64

* Support pure cpu test

* eager logic

* refine test in pure cpu

* eager logic

* eager logic

* eager logic, test=develop

* skip core.eager when in inference, test=develop

* refine, test=develop

* refine, test=develop

* call RetainGrad after run forward kernel, test=develop

* refine, test=develop

* support dygraph util, meta, guard test

* eager test case

* support inference test

* refine test and fix initializer failed

* modify eagertensor patch method

* add eagertensor.clear_grandint, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* support create varbase and fix retain grad error

* call monkey_patch_varbase in _test_eager_guard, test=develop

* fix windows error

* split clear_gradient to clear_gradient and zero_grads, test=develop

* refine, test=develop

* refine, test=develop

* support test_imperative_basic test in eager mode

* remove additional log in variable.h

* remove additional log in variable.h

* remove additional code create in merge

* eager

* fix some eager logic, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* patch_tensor_method_func, test=develop

* refine, test=develop

* eager test case, test=develop

* refine, test=develop

* eager, test=develop

* eager, test=develop

* eager optimizer, test=develop

* eager optimizer, test=develop

* eager test_imperative_optimizer_v2, test=develop

* eager, test=develop

* refine, test=develop

* refine, test=develop

* eager, test=develop

* add resize in share buffer to, test=develop

* eager, test=develop

* fix _share_buffer_to, test=develop

* refine, test=develop

* refine, test=develop

* support eager for dataloader,test=develop

* Exposed EagerTensor's set func to implement set_value func

* Rename set to _set_value, Supplement the corresponding test case

* fix test concat dev api build failed

* fix conflict

* fix conflict

* Use extern to Polish code
Co-authored-by: Njim19930609 <jim19930609@gmail.com>
Co-authored-by: NJiabinYang <360788950@qq.com>
Co-authored-by: NWang Huan <wanghuan29@baidu.com>
Co-authored-by: Nwanghuancoder <wanghuancoder@163.com>
Co-authored-by: Nchentianyu03 <chentianyu03@baidu.com>
上级 85334b04
......@@ -34,6 +34,10 @@ limitations under the License. */
namespace paddle {
namespace pybind {
extern void InitEagerTensorWithNumpyValue(EagerTensorObject* self,
const pybind11::object& array,
bool zero_copy);
extern PyTypeObject* p_eager_tensor_type;
static PyObject* eager_tensor_method_numpy(EagerTensorObject* self,
......@@ -359,6 +363,20 @@ static PyObject* eager_tensor_method_get_underline_tensor(
EAGER_CATCH_AND_THROW_RETURN_NULL
}
// NOTE(wuweilong): Set value and not change self's original place
static PyObject* eager_tensor_method_set_value(EagerTensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "Value " << self->eager_tensor.name();
pybind11::object numpy_value =
pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true);
InitEagerTensorWithNumpyValue(self, numpy_value, false);
Py_INCREF(Py_None);
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef variable_methods[] = {
{"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS, NULL},
......@@ -393,6 +411,8 @@ PyMethodDef variable_methods[] = {
{"get_tensor",
(PyCFunction)(void (*)(void))eager_tensor_method_get_underline_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_set_value", (PyCFunction)(void (*)(void))eager_tensor_method_set_value,
METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}};
} // namespace pybind
......
......@@ -180,6 +180,10 @@ def monkey_patch_varbase():
"Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format(
self.name, self_tensor_np.dtype, value_np.dtype)
# NOTE(wuweilong): self could be VarBase or EagerTensor, the subsequent behavior are defined in different files
# if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc
# if self is EagerTensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
# this Interface behavior will be unifed in the future.
self.value().get_tensor().set(value_np,
framework._current_expected_place())
......
......@@ -763,6 +763,24 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
paddle.fluid.framework._current_expected_place())
self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized())
def test_set_value(self):
with _test_eager_guard():
ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor = core.eager.EagerTensor(value=ori_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr))
ori_place = egr_tensor.place
new_arr = np.random.rand(4, 4, 16, 32).astype('float32')
self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr))
egr_tensor._set_value(new_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
self.assertTrue(egr_tensor.place._equals(ori_place))
self.assertEqual(egr_tensor.shape, [4, 4, 16, 32])
self.assertTrue(np.array_equal(egr_tensor.numpy(), new_arr))
class EagerParamBaseUsageTestCase(unittest.TestCase):
def test_print(self):
......@@ -856,6 +874,17 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
egr_tensor12.backward()
self.assertTrue(np.array_equal(egr_tensor12.gradient(), arr))
def test_set_value(self):
with _test_eager_guard():
linear = paddle.nn.Linear(1, 3)
ori_place = linear.weight.place
new_weight = np.ones([1, 3]).astype('float32')
self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight))
linear.weight._set_value(new_weight)
self.assertTrue(np.array_equal(linear.weight.numpy(), new_weight))
self.assertTrue(linear.weight.place._equals(ori_place))
class EagerGuardTestCase(unittest.TestCase):
def test__test_eager_guard(self):
......
......@@ -21,6 +21,7 @@ import paddle.nn as nn
import paddle.fluid as fluid
import numpy as np
from paddle.fluid.framework import _test_eager_guard
class LeNetDygraph(fluid.dygraph.Layer):
......@@ -70,7 +71,7 @@ def init_weights(layer):
class TestLayerApply(unittest.TestCase):
def test_apply_init_weight(self):
def func_apply_init_weight(self):
with fluid.dygraph.guard():
net = LeNetDygraph()
......@@ -84,6 +85,11 @@ class TestLayerApply(unittest.TestCase):
np.testing.assert_allclose(layer.weight.numpy(), 0.7)
np.testing.assert_allclose(layer.bias.numpy(), -0.2)
def test_apply_init_weight(self):
with _test_eager_guard():
self.func_apply_init_weight()
self.func_apply_init_weight()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册