diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 07644bfa195d26ab44fde07931b7599e43a0e084..12c14ebf5e5ae91a56acd60f5d404b1dc9ffddf4 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -1003,13 +1003,6 @@ static std::string GenerateGradNodeCreationContent( grad_node_creation_str += paddle::string::Sprintf(ADD_EDGES_TEMPLATE, input_autograd_name, input_autograd_name, input_position); - - VLOG(6) << "Generated Call RetainGradForTensor"; - const char* RETAIN_GRAD_TEMPLATE = - " egr::EagerUtils::CheckAndRetainGrad(%s);\n"; - grad_node_creation_str += - paddle::string::Sprintf(RETAIN_GRAD_TEMPLATE, input_name); - } else { compute_require_grad_args += ", &" + input_autograd_name; size_t input_position = fwd_inputs_name_pos_map.at(input_name); @@ -1023,6 +1016,11 @@ static std::string GenerateGradNodeCreationContent( grad_node_creation_str += paddle::string::Sprintf( ADD_EDGES_TEMPLATE, input_autograd_name, input_position); } + VLOG(6) << "Generated Call RetainGradForTensor"; + const char* RETAIN_GRAD_TEMPLATE = + " egr::EagerUtils::CheckAndRetainGrad(%s);\n"; + grad_node_creation_str += + paddle::string::Sprintf(RETAIN_GRAD_TEMPLATE, input_name); } // [GradOpNode] SetGradInMeta diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index 9b69ccca5a2918f61342c8767ea63f942b1881c9..34ab707fe2910e4da84364c2a4339afe0fdd96d6 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -42,6 +42,7 @@ namespace py = ::pybind11; PyTypeObject* p_eager_tensor_type; extern PyTypeObject* g_vartype_pytype; +extern PyTypeObject* g_framework_tensor_pytype; PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) { PyObject* obj = type->tp_alloc(type, 0); @@ -154,6 +155,37 @@ void InitEagerTensorWithEagerTensor(EagerTensorObject* self, } } +void InitEagerTensorWithFrameworkTensor(EagerTensorObject* self, + const framework::Tensor& src, + const paddle::platform::Place& place, + const std::string& name) { + self->eager_tensor.set_name(name); + if (place == src.place()) { + std::shared_ptr dense_tensor = + std::make_shared( + pten::make_intrusive(place), + pten::DenseTensorMeta(pten::TransToPtenDataType(src.type()), + src.dims())); + paddle::experimental::ReMakePtenDenseTensor(src, dense_tensor.get()); + self->eager_tensor.set_impl(dense_tensor); + VLOG(4) << "Same place, do ShareDataWith"; + } else { + std::shared_ptr dense_tensor = + std::make_shared( + pten::make_intrusive( + src.place()), + pten::DenseTensorMeta(pten::TransToPtenDataType(src.type()), + src.dims())); + paddle::experimental::ReMakePtenDenseTensor(src, dense_tensor.get()); + auto temp = egr::EagerTensor(dense_tensor); + self->eager_tensor.set_impl( + temp.copy_to(pten::TransToPtenBackend(place), true).impl()); + VLOG(4) << "Different place, do TensorCopy"; + } + egr::EagerUtils::autograd_meta(&(self->eager_tensor))->SetStopGradient(true); + egr::EagerUtils::unsafe_autograd_meta(self->eager_tensor) + ->SetPersistable(false); +} // TODO(jiabin): We have to do some ugly work, refactor this method using // PyArg_ParseTuple(),PyArg_ParseTupleAndKeywords() and PyArg_Parse() later to // support kwargs. @@ -187,6 +219,11 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { * ** tensor: EagerTensor, * ** place: paddle::platform::Place, * ** name: std::string) + * 7. (multi-place) (must have first 2 parameter) + * def __init__ ( + * ** tensor: FrameworkTensor, + * ** place: paddle::platform::Place, + * ** name: std::string) * **/ PADDLE_ENFORCE_NOT_NULL( self, paddle::platform::errors::Fatal( @@ -236,6 +273,15 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { egr::Controller::Instance().GetExpectedPlace(), egr::Controller::Instance().GenerateUniqueName("generated_tensor")); return 0; + } else if (PyObject_IsInstance( + arg0_ptr, + reinterpret_cast(g_framework_tensor_pytype))) { + VLOG(6) << "Calling case7's initializer."; + auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0); + InitEagerTensorWithFrameworkTensor( + py_tensor_ptr, src_tensor, src_tensor.place(), + egr::Controller::Instance().GenerateUniqueName("generated_tensor")); + return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( "We only support construct tensor from numpy value or tensor with " @@ -275,6 +321,17 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { py_tensor_ptr, src_tensor, place, egr::Controller::Instance().GenerateUniqueName("generated_tensor")); return 0; + } else if (PyObject_IsInstance( + arg0_ptr, + reinterpret_cast(g_framework_tensor_pytype))) { + VLOG(6) << "Calling case7's initializer."; + auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0); + paddle::platform::Place place = + CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1); + InitEagerTensorWithFrameworkTensor( + py_tensor_ptr, src_tensor, place, + egr::Controller::Instance().GenerateUniqueName("generated_tensor")); + return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( "We only support construct tensor from numpy value or tensor with " @@ -282,6 +339,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { "please check your input first and make sure you are on the right " "way.")); } + return 0; } case (Py_ssize_t)3: { PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); @@ -321,6 +379,24 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place, act_name); return 0; + } else if (PyObject_IsInstance( + arg0_ptr, + reinterpret_cast(g_framework_tensor_pytype))) { + VLOG(6) << "Calling case7's initializer."; + auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0); + paddle::platform::Place place = + CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1); + std::string act_name = ""; + PyObject* name_obj = PyTuple_GET_ITEM(args, 2); + if (name_obj == Py_None) { + act_name = egr::Controller::Instance().GenerateUniqueName( + "generated_tensor"); + } else { + act_name = CastPyArg2AttrString(name_obj, 2); + } + InitEagerTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place, + act_name); + return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( "We only support construct tensor from numpy value or tensor with " @@ -328,6 +404,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { "please check your input first and make sure you are on the right " "way.")); } + return 0; } case (Py_ssize_t)4: { VLOG(6) << "Calling case3's initializer."; diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index e0e23b5a49f6ed3a630fb9b1c297731532333fdd..f5a48a279743293835ddfb993f4258eca4dff9d1 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -140,13 +140,15 @@ static PyObject* eager_tensor_method_copy_(EagerTensorObject* self, static PyObject* eager_tensor_retain_grads(EagerTensorObject* self, PyObject* args, PyObject* kwargs) { EAGER_TRY - auto meta = egr::EagerUtils::autograd_meta(&(self->eager_tensor)); - if (!meta->GetMutableGradNode()) { - VLOG(6) << "Make grad node of tensor: " << self->eager_tensor.name() - << "become accumulation node"; - meta->SetGradNode(std::make_shared()); + if (egr::Controller::Instance().HasGrad()) { + auto meta = egr::EagerUtils::autograd_meta(&(self->eager_tensor)); + if (!meta->GetMutableGradNode()) { + VLOG(6) << "Make grad node of tensor: " << self->eager_tensor.name() + << "become accumulation node"; + meta->SetGradNode(std::make_shared()); + } + egr::egr_utils_api::RetainGradForTensor(self->eager_tensor); } - egr::egr_utils_api::RetainGradForTensor(self->eager_tensor); Py_INCREF(Py_None); return Py_None; EAGER_CATCH_AND_THROW_RETURN_NULL diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index ba328692dd2aca56e1b2176b4cb7909102f7e9b8..879ea2b5d264e2c3727293749583e21ba580bfc9 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -40,6 +40,7 @@ extern PyTypeObject* g_cpuplace_pytype; extern PyTypeObject* g_xpuplace_pytype; extern PyTypeObject* g_npuplace_pytype; extern PyTypeObject* g_cudapinnedplace_pytype; +extern PyTypeObject* g_framework_tensor_pytype; int TensorDtype2NumpyDtype(pten::DataType dtype) { switch (dtype) { @@ -300,6 +301,18 @@ platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) { return place; } +framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) { + if (PyObject_IsInstance( + obj, reinterpret_cast(g_framework_tensor_pytype))) { + return ::pybind11::handle(obj).cast(); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "EagerTensor, but got %s", + arg_pos + 1, reinterpret_cast(obj->ob_type)->tp_name)); + } +} + paddle::framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, ssize_t arg_pos) { paddle::framework::proto::VarType::Type dtype; diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 7b7a88b5ac45e2c87b8f4bdc62b0b6e49a45a5e6..20c82c572c325da39bd6f0108d4eef7de410d8b3 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -35,6 +35,7 @@ egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos); std::vector CastPyArg2VectorOfEagerTensor(PyObject* obj, ssize_t arg_pos); platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos); +framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos); std::vector CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos); framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, ssize_t arg_pos); diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 900b49ea7e6423ffe21cf128587f33265147a3a8..00e97bc2db4205c36f66421f43c59606093cf827 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -763,7 +763,6 @@ void BindImperative(py::module *m_ptr) { []() { imperative::SetLoadProcessSignalHandler(); }); m.def("_throw_error_if_process_failed", []() { imperative::ThrowErrorIfLoadProcessFailed(); }); - // Dygraph DataLoader reader process & thread related functions m.def( "_convert_to_tensor_list", @@ -866,7 +865,10 @@ void BindImperative(py::module *m_ptr) { m.def("start_imperative_gperf_profiler", []() { imperative::StartProfile(); }); - + m.def("_set_eager_tracer", + [](const std::shared_ptr &tracer) { + egr::Controller::Instance().SetCurrentTracer(tracer); + }); m.def("stop_imperative_gperf_profiler", []() { imperative::StopProfile(); }); m.def("_is_dygraph_debug_enabled", diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a15e26b848e63e9247d8afd86e34b1077059fc16..46a679b0c97a0fd724f3591573bf8d22a9220ea0 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -164,6 +164,7 @@ PyTypeObject *g_cpuplace_pytype = nullptr; PyTypeObject *g_xpuplace_pytype = nullptr; PyTypeObject *g_npuplace_pytype = nullptr; PyTypeObject *g_cudapinnedplace_pytype = nullptr; +PyTypeObject *g_framework_tensor_pytype = nullptr; bool IsCompiledWithCUDA() { #if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP) @@ -733,7 +734,11 @@ PYBIND11_MODULE(core_noavx, m) { BindImperative(&m); - py::class_(m, "Tensor", py::buffer_protocol()) + py::class_ framework_tensor(m, "Tensor", + py::buffer_protocol()); + g_framework_tensor_pytype = + reinterpret_cast(framework_tensor.ptr()); + framework_tensor .def("__array__", [](framework::Tensor &self) { return TensorToPyArray(self); }) .def("_is_initialized", diff --git a/paddle/pten/api/lib/utils/tensor_utils.cc b/paddle/pten/api/lib/utils/tensor_utils.cc index b248cd209899b330088cedff380980334a7fbb28..4ce63c7b821b48e8c7afaf28e995ee9f08e83eef 100644 --- a/paddle/pten/api/lib/utils/tensor_utils.cc +++ b/paddle/pten/api/lib/utils/tensor_utils.cc @@ -411,6 +411,28 @@ void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, } } +void ReMakePtenDenseTensor(const paddle::framework::Tensor& src, + pten::DenseTensor* dst) { + auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst); + meta->dims = src.dims(); + // Since the type of DenseTensorMeta is const, const_cast must be used + const_cast(meta->dtype) = pten::TransToPtenDataType(src.type()); + // Since the type of DenseTensorMeta is const, const_cast must be used + const_cast(meta->layout) = + pten::TransToPtenDataLayout(src.layout()); + + auto* shared_storage = static_cast( + pten::CompatibleDenseTensorUtils::UnsafeGetMutableStorage(dst)); + PADDLE_ENFORCE_NOT_NULL( + shared_storage, + platform::errors::NotFound( + "Target DenseTensor's shared storage is nullptr.")); + + if (src.IsInitialized()) { + shared_storage->ResetAllocation(src.Holder(), src.offset()); + } +} + void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, const pten::TensorArgDef& arg_def, pten::DenseTensor* dst) { diff --git a/paddle/pten/api/lib/utils/tensor_utils.h b/paddle/pten/api/lib/utils/tensor_utils.h index 32b7c377ebfdef1f7b7733ae2a32f70f2a33721a..838a63e1a8d5ffefd0ee5b68512b622c23980876 100644 --- a/paddle/pten/api/lib/utils/tensor_utils.h +++ b/paddle/pten/api/lib/utils/tensor_utils.h @@ -75,6 +75,9 @@ void MovesSharedStorage(pten::DenseTensor* src, void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, pten::DenseTensor* dst); +void ReMakePtenDenseTensor(const paddle::framework::Tensor& src, + pten::DenseTensor* dst); + void ReMakePtenDenseTensor(const paddle::framework::Tensor& src, const pten::TensorArgDef& arg_def, pten::DenseTensor* dst); diff --git a/python/paddle/fluid/core.py b/python/paddle/fluid/core.py index a9cfe0babec0f63dd8d85a695c26ecc8f1509f54..9b99e17e9e51c062477d0832fe07fe966a44cbc6 100644 --- a/python/paddle/fluid/core.py +++ b/python/paddle/fluid/core.py @@ -268,6 +268,7 @@ if avx_supported(): from .core_avx import _is_dygraph_debug_enabled from .core_avx import _dygraph_debug_level from .core_avx import _switch_tracer + from .core_avx import _set_eager_tracer from .core_avx import _disable_eager_mode from .core_avx import _enable_eager_mode from .core_avx import _in_eager_mode @@ -324,6 +325,7 @@ if load_noavx: from .core_noavx import _is_dygraph_debug_enabled from .core_noavx import _dygraph_debug_level from .core_noavx import _switch_tracer + from .core_noavx import _set_eager_tracer from .core_noavx import _disable_eager_mode from .core_noavx import _enable_eager_mode from .core_noavx import _in_eager_mode diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index cf148257c5f088964fdb115603bea8cc9922b8b0..fd2a93874876ca852c2ad88f4e6ebb8e48892bcb 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -81,10 +81,13 @@ core._disable_eager_mode() @signature_safe_contextmanager -def _test_eager_guard(): +def _test_eager_guard(tracer=None): core._enable_eager_mode() _C_ops.switch_to_eager_ops() - core._switch_tracer(_dygraph_tracer_) + if tracer is None: + core._set_eager_tracer(_dygraph_tracer_) + else: + core._set_eager_tracer(tracer) try: yield finally: diff --git a/python/paddle/fluid/tests/unittests/test_egr_python_api.py b/python/paddle/fluid/tests/unittests/test_egr_python_api.py index 803631a4d2cda382baa9b9125907ddd535f00ab0..64c563ce721512541a21965722d78bc80f949676 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_python_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_python_api.py @@ -16,7 +16,7 @@ import paddle.fluid.core as core import paddle.fluid.eager.eager_tensor_patch_methods as eager_tensor_patch_methods import paddle import numpy as np -from paddle.fluid.framework import _test_eager_guard, EagerParamBase +from paddle.fluid.framework import _test_eager_guard, EagerParamBase, _in_eager_mode from paddle.fluid.data_feeder import convert_dtype import unittest import copy @@ -221,6 +221,36 @@ class EagerTensorPropertiesTestCase(unittest.TestCase): self.assertTrue(egr_tensor9.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4)) + x = np.random.rand(3, 3).astype('float32') + t = paddle.fluid.Tensor() + t.set(x, paddle.fluid.CPUPlace()) + egr_tensor10 = core.eager.EagerTensor(t, place) + self.assertEqual(egr_tensor10.persistable, False) + self.assertTrue("generated_tensor" in egr_tensor10.name) + self.assertEqual(egr_tensor10.shape, [3, 3]) + self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32) + self.assertEqual(egr_tensor10.stop_gradient, True) + self.assertTrue(egr_tensor10.place._equals(place)) + self.assertTrue(np.array_equal(egr_tensor10.numpy(), x)) + + egr_tensor11 = core.eager.EagerTensor(t, place, "framework_constructed") + self.assertEqual(egr_tensor11.persistable, False) + self.assertTrue("framework_constructed" in egr_tensor11.name) + self.assertEqual(egr_tensor11.shape, [3, 3]) + self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32) + self.assertEqual(egr_tensor11.stop_gradient, True) + self.assertTrue(egr_tensor11.place._equals(place)) + self.assertTrue(np.array_equal(egr_tensor11.numpy(), x)) + + egr_tensor12 = core.eager.EagerTensor(t) + self.assertEqual(egr_tensor12.persistable, False) + self.assertTrue("generated_tensor" in egr_tensor12.name) + self.assertEqual(egr_tensor12.shape, [3, 3]) + self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) + self.assertEqual(egr_tensor12.stop_gradient, True) + self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace())) + self.assertTrue(np.array_equal(egr_tensor12.numpy(), x)) + with self.assertRaisesRegexp( ValueError, "The shape of Parameter should not be None"): eager_param = EagerParamBase(shape=None, dtype="float32") @@ -423,5 +453,12 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): self.assertTrue(np.array_equal(res3, res4)) +class EagerGuardTestCase(unittest.TestCase): + def test__test_eager_guard(self): + tracer = paddle.fluid.dygraph.tracer.Tracer() + with _test_eager_guard(tracer): + self.assertTrue(_in_eager_mode()) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index c8836cd7767e8295a8d52f771a3fc6c99a017056..d523e746b939c44d53b1586e6338eb85397d4876 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -24,7 +24,7 @@ from test_imperative_base import new_program_scope import paddle.fluid.dygraph_utils as dygraph_utils from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper import paddle -from paddle.fluid.framework import _test_eager_guard +from paddle.fluid.framework import _test_eager_guard, _in_eager_mode class MyLayer(fluid.Layer): @@ -259,12 +259,28 @@ class TestImperative(unittest.TestCase): self.func_isinstance() self.func_isinstance() - def test_create_VarBase(self): + def func_create_varbase(self): x = np.ones([2, 2], np.float32) y = np.zeros([3, 3], np.float32) t = fluid.Tensor() t.set(x, fluid.CPUPlace()) - with fluid.dygraph.guard(): + if _in_eager_mode(): + # TODO(jiabin): Support Kwargs and uncomment these tests + # egr_tmp = fluid.core.eager.EagerTensor(value=x, place=fluid.core.CPUPlace()) + egr_tmp2 = fluid.core.eager.EagerTensor(y, fluid.core.CPUPlace()) + egr_tmp3 = paddle.to_tensor(x) + egr_tmp4 = fluid.core.eager.EagerTensor(y) + # egr_tmp5 = fluid.core.eager.EagerTensor(value=x) + # TODO(jiabin): Support it when we merge LoDTensor with DenseTensor + egr_tmp6 = fluid.core.eager.EagerTensor(t) + + # self.assertTrue(np.array_equal(x, egr_tmp.numpy())) + self.assertTrue(np.array_equal(y, egr_tmp2.numpy())) + self.assertTrue(np.array_equal(x, egr_tmp3.numpy())) + self.assertTrue(np.array_equal(y, egr_tmp4.numpy())) + # self.assertTrue(np.array_equal(x, egr_tmp5.numpy())) + self.assertTrue(np.array_equal(x, egr_tmp6.numpy())) + else: tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace()) tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace()) tmp3 = paddle.to_tensor(x) @@ -279,6 +295,12 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.array_equal(x, tmp5.numpy())) self.assertTrue(np.array_equal(x, tmp6.numpy())) + def test_create_varbase(self): + with fluid.dygraph.guard(): + with _test_eager_guard(): + self.func_create_varbase() + self.func_create_varbase() + def test_no_grad_guard(self): data = np.array([[2, 3], [4, 5]]).astype('float32') with fluid.dygraph.guard(): @@ -758,7 +780,7 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.allclose(dy_grad_h2h2, static_grad_h2h)) self.assertTrue(np.allclose(dy_grad_i2h2, static_grad_i2h)) - def test_layer_attrs(self): + def func_layer_attrs(self): layer = fluid.dygraph.Layer("test") layer.test_attr = 1 self.assertFalse(hasattr(layer, "whatever")) @@ -778,6 +800,11 @@ class TestImperative(unittest.TestCase): my_layer.l1 = None self.assertEqual(len(my_layer.sublayers()), 0) + def test_layer_attrs(self): + with _test_eager_guard(): + self.func_layer_attrs() + self.func_layer_attrs() + class TestDygraphUtils(unittest.TestCase): def func_append_activation_in_dygraph_exception(self):