From 1fb80a6a20d3e28b9b3318474c1eadf1d2d77859 Mon Sep 17 00:00:00 2001 From: Jiabin Yang Date: Tue, 28 Dec 2021 19:04:42 +0800 Subject: [PATCH] Support test basic of Var and Layer (#38426) * Rearranged Eager AutoCodeGen directory structure * Removed USE_OP in Eager AutoCodeGen * Enabled generation for Operators without Grad/Inputs/Outputs * Resolved operators without input * Fixed merge conflicts * Enabled Eager AutoCodeGen for 10+ more operators * Refactored Eager AutoCodeGen with more organized helper objects * Enabled Eager AutoCodeGen for operators with multiple OpBases * Adjusted Eager AutoCodeGen to Enable Passing Output Tensor as Input Argument * Handled Dispensable Inputs/Outputs in Eager AutoCodeGen * Adjusted function generation/call between Python-C API & Dygraph API * Synchronized auto-generated Python-C API with Dygraph Forward Functions * support more eager tensor api * fix merge compile error * fix compile error and fit develop code * support pure CPU * fix some logic error in eager_mode * support _varbase_creator in eager mode * Added safe_initialized interface to EagerTensor for use in processing dispensable inputs * for eager mode * refine * support multiple constructor for eager tensor * add place related code * polish code * specific randint with dtype of int64 * Support pure cpu test * eager logic * refine test in pure cpu * eager logic * eager logic * eager logic, test=develop * skip core.eager when in inference, test=develop * refine, test=develop * refine, test=develop * call RetainGrad after run forward kernel, test=develop * refine, test=develop * support dygraph util, meta, guard test * support inference test * refine test and fix initializer failed * support create varbase and fix retain grad error * fix windows error * support test code coverage * support test code coverage * support test code coverage Co-authored-by: jim19930609 Co-authored-by: Wang Huan --- .../auto_code_generator/eager_generator.cc | 12 ++- paddle/fluid/pybind/eager.cc | 77 +++++++++++++++++++ paddle/fluid/pybind/eager_method.cc | 14 ++-- paddle/fluid/pybind/eager_utils.cc | 13 ++++ paddle/fluid/pybind/eager_utils.h | 1 + paddle/fluid/pybind/imperative.cc | 6 +- paddle/fluid/pybind/pybind.cc | 7 +- paddle/pten/api/lib/utils/tensor_utils.cc | 22 ++++++ paddle/pten/api/lib/utils/tensor_utils.h | 3 + python/paddle/fluid/core.py | 2 + python/paddle/fluid/framework.py | 7 +- .../tests/unittests/test_egr_python_api.py | 39 +++++++++- .../tests/unittests/test_imperative_basic.py | 35 ++++++++- 13 files changed, 215 insertions(+), 23 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 07644bfa195..12c14ebf5e5 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -1003,13 +1003,6 @@ static std::string GenerateGradNodeCreationContent( grad_node_creation_str += paddle::string::Sprintf(ADD_EDGES_TEMPLATE, input_autograd_name, input_autograd_name, input_position); - - VLOG(6) << "Generated Call RetainGradForTensor"; - const char* RETAIN_GRAD_TEMPLATE = - " egr::EagerUtils::CheckAndRetainGrad(%s);\n"; - grad_node_creation_str += - paddle::string::Sprintf(RETAIN_GRAD_TEMPLATE, input_name); - } else { compute_require_grad_args += ", &" + input_autograd_name; size_t input_position = fwd_inputs_name_pos_map.at(input_name); @@ -1023,6 +1016,11 @@ static std::string GenerateGradNodeCreationContent( grad_node_creation_str += paddle::string::Sprintf( ADD_EDGES_TEMPLATE, input_autograd_name, input_position); } + VLOG(6) << "Generated Call RetainGradForTensor"; + const char* RETAIN_GRAD_TEMPLATE = + " egr::EagerUtils::CheckAndRetainGrad(%s);\n"; + grad_node_creation_str += + paddle::string::Sprintf(RETAIN_GRAD_TEMPLATE, input_name); } // [GradOpNode] SetGradInMeta diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index 9b69ccca5a2..34ab707fe29 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -42,6 +42,7 @@ namespace py = ::pybind11; PyTypeObject* p_eager_tensor_type; extern PyTypeObject* g_vartype_pytype; +extern PyTypeObject* g_framework_tensor_pytype; PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) { PyObject* obj = type->tp_alloc(type, 0); @@ -154,6 +155,37 @@ void InitEagerTensorWithEagerTensor(EagerTensorObject* self, } } +void InitEagerTensorWithFrameworkTensor(EagerTensorObject* self, + const framework::Tensor& src, + const paddle::platform::Place& place, + const std::string& name) { + self->eager_tensor.set_name(name); + if (place == src.place()) { + std::shared_ptr dense_tensor = + std::make_shared( + pten::make_intrusive(place), + pten::DenseTensorMeta(pten::TransToPtenDataType(src.type()), + src.dims())); + paddle::experimental::ReMakePtenDenseTensor(src, dense_tensor.get()); + self->eager_tensor.set_impl(dense_tensor); + VLOG(4) << "Same place, do ShareDataWith"; + } else { + std::shared_ptr dense_tensor = + std::make_shared( + pten::make_intrusive( + src.place()), + pten::DenseTensorMeta(pten::TransToPtenDataType(src.type()), + src.dims())); + paddle::experimental::ReMakePtenDenseTensor(src, dense_tensor.get()); + auto temp = egr::EagerTensor(dense_tensor); + self->eager_tensor.set_impl( + temp.copy_to(pten::TransToPtenBackend(place), true).impl()); + VLOG(4) << "Different place, do TensorCopy"; + } + egr::EagerUtils::autograd_meta(&(self->eager_tensor))->SetStopGradient(true); + egr::EagerUtils::unsafe_autograd_meta(self->eager_tensor) + ->SetPersistable(false); +} // TODO(jiabin): We have to do some ugly work, refactor this method using // PyArg_ParseTuple(),PyArg_ParseTupleAndKeywords() and PyArg_Parse() later to // support kwargs. @@ -187,6 +219,11 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { * ** tensor: EagerTensor, * ** place: paddle::platform::Place, * ** name: std::string) + * 7. (multi-place) (must have first 2 parameter) + * def __init__ ( + * ** tensor: FrameworkTensor, + * ** place: paddle::platform::Place, + * ** name: std::string) * **/ PADDLE_ENFORCE_NOT_NULL( self, paddle::platform::errors::Fatal( @@ -236,6 +273,15 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { egr::Controller::Instance().GetExpectedPlace(), egr::Controller::Instance().GenerateUniqueName("generated_tensor")); return 0; + } else if (PyObject_IsInstance( + arg0_ptr, + reinterpret_cast(g_framework_tensor_pytype))) { + VLOG(6) << "Calling case7's initializer."; + auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0); + InitEagerTensorWithFrameworkTensor( + py_tensor_ptr, src_tensor, src_tensor.place(), + egr::Controller::Instance().GenerateUniqueName("generated_tensor")); + return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( "We only support construct tensor from numpy value or tensor with " @@ -275,6 +321,17 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { py_tensor_ptr, src_tensor, place, egr::Controller::Instance().GenerateUniqueName("generated_tensor")); return 0; + } else if (PyObject_IsInstance( + arg0_ptr, + reinterpret_cast(g_framework_tensor_pytype))) { + VLOG(6) << "Calling case7's initializer."; + auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0); + paddle::platform::Place place = + CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1); + InitEagerTensorWithFrameworkTensor( + py_tensor_ptr, src_tensor, place, + egr::Controller::Instance().GenerateUniqueName("generated_tensor")); + return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( "We only support construct tensor from numpy value or tensor with " @@ -282,6 +339,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { "please check your input first and make sure you are on the right " "way.")); } + return 0; } case (Py_ssize_t)3: { PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); @@ -321,6 +379,24 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place, act_name); return 0; + } else if (PyObject_IsInstance( + arg0_ptr, + reinterpret_cast(g_framework_tensor_pytype))) { + VLOG(6) << "Calling case7's initializer."; + auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0); + paddle::platform::Place place = + CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1); + std::string act_name = ""; + PyObject* name_obj = PyTuple_GET_ITEM(args, 2); + if (name_obj == Py_None) { + act_name = egr::Controller::Instance().GenerateUniqueName( + "generated_tensor"); + } else { + act_name = CastPyArg2AttrString(name_obj, 2); + } + InitEagerTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place, + act_name); + return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( "We only support construct tensor from numpy value or tensor with " @@ -328,6 +404,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { "please check your input first and make sure you are on the right " "way.")); } + return 0; } case (Py_ssize_t)4: { VLOG(6) << "Calling case3's initializer."; diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index e0e23b5a49f..f5a48a27974 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -140,13 +140,15 @@ static PyObject* eager_tensor_method_copy_(EagerTensorObject* self, static PyObject* eager_tensor_retain_grads(EagerTensorObject* self, PyObject* args, PyObject* kwargs) { EAGER_TRY - auto meta = egr::EagerUtils::autograd_meta(&(self->eager_tensor)); - if (!meta->GetMutableGradNode()) { - VLOG(6) << "Make grad node of tensor: " << self->eager_tensor.name() - << "become accumulation node"; - meta->SetGradNode(std::make_shared()); + if (egr::Controller::Instance().HasGrad()) { + auto meta = egr::EagerUtils::autograd_meta(&(self->eager_tensor)); + if (!meta->GetMutableGradNode()) { + VLOG(6) << "Make grad node of tensor: " << self->eager_tensor.name() + << "become accumulation node"; + meta->SetGradNode(std::make_shared()); + } + egr::egr_utils_api::RetainGradForTensor(self->eager_tensor); } - egr::egr_utils_api::RetainGradForTensor(self->eager_tensor); Py_INCREF(Py_None); return Py_None; EAGER_CATCH_AND_THROW_RETURN_NULL diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index ba328692dd2..879ea2b5d26 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -40,6 +40,7 @@ extern PyTypeObject* g_cpuplace_pytype; extern PyTypeObject* g_xpuplace_pytype; extern PyTypeObject* g_npuplace_pytype; extern PyTypeObject* g_cudapinnedplace_pytype; +extern PyTypeObject* g_framework_tensor_pytype; int TensorDtype2NumpyDtype(pten::DataType dtype) { switch (dtype) { @@ -300,6 +301,18 @@ platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) { return place; } +framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) { + if (PyObject_IsInstance( + obj, reinterpret_cast(g_framework_tensor_pytype))) { + return ::pybind11::handle(obj).cast(); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "EagerTensor, but got %s", + arg_pos + 1, reinterpret_cast(obj->ob_type)->tp_name)); + } +} + paddle::framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, ssize_t arg_pos) { paddle::framework::proto::VarType::Type dtype; diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 7b7a88b5ac4..20c82c572c3 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -35,6 +35,7 @@ egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos); std::vector CastPyArg2VectorOfEagerTensor(PyObject* obj, ssize_t arg_pos); platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos); +framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos); std::vector CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos); framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, ssize_t arg_pos); diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 900b49ea7e6..00e97bc2db4 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -763,7 +763,6 @@ void BindImperative(py::module *m_ptr) { []() { imperative::SetLoadProcessSignalHandler(); }); m.def("_throw_error_if_process_failed", []() { imperative::ThrowErrorIfLoadProcessFailed(); }); - // Dygraph DataLoader reader process & thread related functions m.def( "_convert_to_tensor_list", @@ -866,7 +865,10 @@ void BindImperative(py::module *m_ptr) { m.def("start_imperative_gperf_profiler", []() { imperative::StartProfile(); }); - + m.def("_set_eager_tracer", + [](const std::shared_ptr &tracer) { + egr::Controller::Instance().SetCurrentTracer(tracer); + }); m.def("stop_imperative_gperf_profiler", []() { imperative::StopProfile(); }); m.def("_is_dygraph_debug_enabled", diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a15e26b848e..46a679b0c97 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -164,6 +164,7 @@ PyTypeObject *g_cpuplace_pytype = nullptr; PyTypeObject *g_xpuplace_pytype = nullptr; PyTypeObject *g_npuplace_pytype = nullptr; PyTypeObject *g_cudapinnedplace_pytype = nullptr; +PyTypeObject *g_framework_tensor_pytype = nullptr; bool IsCompiledWithCUDA() { #if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP) @@ -733,7 +734,11 @@ PYBIND11_MODULE(core_noavx, m) { BindImperative(&m); - py::class_(m, "Tensor", py::buffer_protocol()) + py::class_ framework_tensor(m, "Tensor", + py::buffer_protocol()); + g_framework_tensor_pytype = + reinterpret_cast(framework_tensor.ptr()); + framework_tensor .def("__array__", [](framework::Tensor &self) { return TensorToPyArray(self); }) .def("_is_initialized", diff --git a/paddle/pten/api/lib/utils/tensor_utils.cc b/paddle/pten/api/lib/utils/tensor_utils.cc index b248cd20989..4ce63c7b821 100644 --- a/paddle/pten/api/lib/utils/tensor_utils.cc +++ b/paddle/pten/api/lib/utils/tensor_utils.cc @@ -411,6 +411,28 @@ void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, } } +void ReMakePtenDenseTensor(const paddle::framework::Tensor& src, + pten::DenseTensor* dst) { + auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst); + meta->dims = src.dims(); + // Since the type of DenseTensorMeta is const, const_cast must be used + const_cast(meta->dtype) = pten::TransToPtenDataType(src.type()); + // Since the type of DenseTensorMeta is const, const_cast must be used + const_cast(meta->layout) = + pten::TransToPtenDataLayout(src.layout()); + + auto* shared_storage = static_cast( + pten::CompatibleDenseTensorUtils::UnsafeGetMutableStorage(dst)); + PADDLE_ENFORCE_NOT_NULL( + shared_storage, + platform::errors::NotFound( + "Target DenseTensor's shared storage is nullptr.")); + + if (src.IsInitialized()) { + shared_storage->ResetAllocation(src.Holder(), src.offset()); + } +} + void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, const pten::TensorArgDef& arg_def, pten::DenseTensor* dst) { diff --git a/paddle/pten/api/lib/utils/tensor_utils.h b/paddle/pten/api/lib/utils/tensor_utils.h index 32b7c377ebf..838a63e1a8d 100644 --- a/paddle/pten/api/lib/utils/tensor_utils.h +++ b/paddle/pten/api/lib/utils/tensor_utils.h @@ -75,6 +75,9 @@ void MovesSharedStorage(pten::DenseTensor* src, void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, pten::DenseTensor* dst); +void ReMakePtenDenseTensor(const paddle::framework::Tensor& src, + pten::DenseTensor* dst); + void ReMakePtenDenseTensor(const paddle::framework::Tensor& src, const pten::TensorArgDef& arg_def, pten::DenseTensor* dst); diff --git a/python/paddle/fluid/core.py b/python/paddle/fluid/core.py index a9cfe0babec..9b99e17e9e5 100644 --- a/python/paddle/fluid/core.py +++ b/python/paddle/fluid/core.py @@ -268,6 +268,7 @@ if avx_supported(): from .core_avx import _is_dygraph_debug_enabled from .core_avx import _dygraph_debug_level from .core_avx import _switch_tracer + from .core_avx import _set_eager_tracer from .core_avx import _disable_eager_mode from .core_avx import _enable_eager_mode from .core_avx import _in_eager_mode @@ -324,6 +325,7 @@ if load_noavx: from .core_noavx import _is_dygraph_debug_enabled from .core_noavx import _dygraph_debug_level from .core_noavx import _switch_tracer + from .core_noavx import _set_eager_tracer from .core_noavx import _disable_eager_mode from .core_noavx import _enable_eager_mode from .core_noavx import _in_eager_mode diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index cf148257c5f..fd2a9387487 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -81,10 +81,13 @@ core._disable_eager_mode() @signature_safe_contextmanager -def _test_eager_guard(): +def _test_eager_guard(tracer=None): core._enable_eager_mode() _C_ops.switch_to_eager_ops() - core._switch_tracer(_dygraph_tracer_) + if tracer is None: + core._set_eager_tracer(_dygraph_tracer_) + else: + core._set_eager_tracer(tracer) try: yield finally: diff --git a/python/paddle/fluid/tests/unittests/test_egr_python_api.py b/python/paddle/fluid/tests/unittests/test_egr_python_api.py index 803631a4d2c..64c563ce721 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_python_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_python_api.py @@ -16,7 +16,7 @@ import paddle.fluid.core as core import paddle.fluid.eager.eager_tensor_patch_methods as eager_tensor_patch_methods import paddle import numpy as np -from paddle.fluid.framework import _test_eager_guard, EagerParamBase +from paddle.fluid.framework import _test_eager_guard, EagerParamBase, _in_eager_mode from paddle.fluid.data_feeder import convert_dtype import unittest import copy @@ -221,6 +221,36 @@ class EagerTensorPropertiesTestCase(unittest.TestCase): self.assertTrue(egr_tensor9.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4)) + x = np.random.rand(3, 3).astype('float32') + t = paddle.fluid.Tensor() + t.set(x, paddle.fluid.CPUPlace()) + egr_tensor10 = core.eager.EagerTensor(t, place) + self.assertEqual(egr_tensor10.persistable, False) + self.assertTrue("generated_tensor" in egr_tensor10.name) + self.assertEqual(egr_tensor10.shape, [3, 3]) + self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32) + self.assertEqual(egr_tensor10.stop_gradient, True) + self.assertTrue(egr_tensor10.place._equals(place)) + self.assertTrue(np.array_equal(egr_tensor10.numpy(), x)) + + egr_tensor11 = core.eager.EagerTensor(t, place, "framework_constructed") + self.assertEqual(egr_tensor11.persistable, False) + self.assertTrue("framework_constructed" in egr_tensor11.name) + self.assertEqual(egr_tensor11.shape, [3, 3]) + self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32) + self.assertEqual(egr_tensor11.stop_gradient, True) + self.assertTrue(egr_tensor11.place._equals(place)) + self.assertTrue(np.array_equal(egr_tensor11.numpy(), x)) + + egr_tensor12 = core.eager.EagerTensor(t) + self.assertEqual(egr_tensor12.persistable, False) + self.assertTrue("generated_tensor" in egr_tensor12.name) + self.assertEqual(egr_tensor12.shape, [3, 3]) + self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) + self.assertEqual(egr_tensor12.stop_gradient, True) + self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace())) + self.assertTrue(np.array_equal(egr_tensor12.numpy(), x)) + with self.assertRaisesRegexp( ValueError, "The shape of Parameter should not be None"): eager_param = EagerParamBase(shape=None, dtype="float32") @@ -423,5 +453,12 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): self.assertTrue(np.array_equal(res3, res4)) +class EagerGuardTestCase(unittest.TestCase): + def test__test_eager_guard(self): + tracer = paddle.fluid.dygraph.tracer.Tracer() + with _test_eager_guard(tracer): + self.assertTrue(_in_eager_mode()) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index c8836cd7767..d523e746b93 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -24,7 +24,7 @@ from test_imperative_base import new_program_scope import paddle.fluid.dygraph_utils as dygraph_utils from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper import paddle -from paddle.fluid.framework import _test_eager_guard +from paddle.fluid.framework import _test_eager_guard, _in_eager_mode class MyLayer(fluid.Layer): @@ -259,12 +259,28 @@ class TestImperative(unittest.TestCase): self.func_isinstance() self.func_isinstance() - def test_create_VarBase(self): + def func_create_varbase(self): x = np.ones([2, 2], np.float32) y = np.zeros([3, 3], np.float32) t = fluid.Tensor() t.set(x, fluid.CPUPlace()) - with fluid.dygraph.guard(): + if _in_eager_mode(): + # TODO(jiabin): Support Kwargs and uncomment these tests + # egr_tmp = fluid.core.eager.EagerTensor(value=x, place=fluid.core.CPUPlace()) + egr_tmp2 = fluid.core.eager.EagerTensor(y, fluid.core.CPUPlace()) + egr_tmp3 = paddle.to_tensor(x) + egr_tmp4 = fluid.core.eager.EagerTensor(y) + # egr_tmp5 = fluid.core.eager.EagerTensor(value=x) + # TODO(jiabin): Support it when we merge LoDTensor with DenseTensor + egr_tmp6 = fluid.core.eager.EagerTensor(t) + + # self.assertTrue(np.array_equal(x, egr_tmp.numpy())) + self.assertTrue(np.array_equal(y, egr_tmp2.numpy())) + self.assertTrue(np.array_equal(x, egr_tmp3.numpy())) + self.assertTrue(np.array_equal(y, egr_tmp4.numpy())) + # self.assertTrue(np.array_equal(x, egr_tmp5.numpy())) + self.assertTrue(np.array_equal(x, egr_tmp6.numpy())) + else: tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace()) tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace()) tmp3 = paddle.to_tensor(x) @@ -279,6 +295,12 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.array_equal(x, tmp5.numpy())) self.assertTrue(np.array_equal(x, tmp6.numpy())) + def test_create_varbase(self): + with fluid.dygraph.guard(): + with _test_eager_guard(): + self.func_create_varbase() + self.func_create_varbase() + def test_no_grad_guard(self): data = np.array([[2, 3], [4, 5]]).astype('float32') with fluid.dygraph.guard(): @@ -758,7 +780,7 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.allclose(dy_grad_h2h2, static_grad_h2h)) self.assertTrue(np.allclose(dy_grad_i2h2, static_grad_i2h)) - def test_layer_attrs(self): + def func_layer_attrs(self): layer = fluid.dygraph.Layer("test") layer.test_attr = 1 self.assertFalse(hasattr(layer, "whatever")) @@ -778,6 +800,11 @@ class TestImperative(unittest.TestCase): my_layer.l1 = None self.assertEqual(len(my_layer.sublayers()), 0) + def test_layer_attrs(self): + with _test_eager_guard(): + self.func_layer_attrs() + self.func_layer_attrs() + class TestDygraphUtils(unittest.TestCase): def func_append_activation_in_dygraph_exception(self): -- GitLab