未验证 提交 1fb80a6a 编写于 作者: J Jiabin Yang 提交者: GitHub

Support test basic of Var and Layer (#38426)

* Rearranged Eager AutoCodeGen directory structure

* Removed USE_OP in Eager AutoCodeGen

* Enabled generation for Operators without Grad/Inputs/Outputs

* Resolved operators without input

* Fixed merge conflicts

* Enabled Eager AutoCodeGen for 10+ more operators

* Refactored Eager AutoCodeGen with more organized helper objects

* Enabled Eager AutoCodeGen for operators with multiple OpBases

* Adjusted Eager AutoCodeGen to Enable Passing Output Tensor as Input Argument

* Handled Dispensable Inputs/Outputs in Eager AutoCodeGen

* Adjusted function generation/call between Python-C API & Dygraph API

* Synchronized auto-generated Python-C API with Dygraph Forward Functions

* support more eager tensor api

* fix merge compile error

* fix compile error and fit develop code

* support pure CPU

* fix some logic error in eager_mode

* support _varbase_creator in eager mode

* Added safe_initialized interface to EagerTensor for use in processing dispensable inputs

* for eager mode

* refine

* support multiple constructor for eager tensor

* add place related code

* polish code

* specific randint with dtype of int64

* Support pure cpu test

* eager logic

* refine test in pure cpu

* eager logic

* eager logic

* eager logic, test=develop

* skip core.eager when in inference, test=develop

* refine, test=develop

* refine, test=develop

* call RetainGrad after run forward kernel, test=develop

* refine, test=develop

* support dygraph util, meta, guard test

* support inference test

* refine test and fix initializer failed

* support create varbase and fix retain grad error

* fix windows error

* support test code coverage

* support test code coverage

* support test code coverage
Co-authored-by: Njim19930609 <jim19930609@gmail.com>
Co-authored-by: NWang Huan <wanghuan29@baidu.com>
上级 2e4cb279
...@@ -1003,13 +1003,6 @@ static std::string GenerateGradNodeCreationContent( ...@@ -1003,13 +1003,6 @@ static std::string GenerateGradNodeCreationContent(
grad_node_creation_str += grad_node_creation_str +=
paddle::string::Sprintf(ADD_EDGES_TEMPLATE, input_autograd_name, paddle::string::Sprintf(ADD_EDGES_TEMPLATE, input_autograd_name,
input_autograd_name, input_position); input_autograd_name, input_position);
VLOG(6) << "Generated Call RetainGradForTensor";
const char* RETAIN_GRAD_TEMPLATE =
" egr::EagerUtils::CheckAndRetainGrad(%s);\n";
grad_node_creation_str +=
paddle::string::Sprintf(RETAIN_GRAD_TEMPLATE, input_name);
} else { } else {
compute_require_grad_args += ", &" + input_autograd_name; compute_require_grad_args += ", &" + input_autograd_name;
size_t input_position = fwd_inputs_name_pos_map.at(input_name); size_t input_position = fwd_inputs_name_pos_map.at(input_name);
...@@ -1023,6 +1016,11 @@ static std::string GenerateGradNodeCreationContent( ...@@ -1023,6 +1016,11 @@ static std::string GenerateGradNodeCreationContent(
grad_node_creation_str += paddle::string::Sprintf( grad_node_creation_str += paddle::string::Sprintf(
ADD_EDGES_TEMPLATE, input_autograd_name, input_position); ADD_EDGES_TEMPLATE, input_autograd_name, input_position);
} }
VLOG(6) << "Generated Call RetainGradForTensor";
const char* RETAIN_GRAD_TEMPLATE =
" egr::EagerUtils::CheckAndRetainGrad(%s);\n";
grad_node_creation_str +=
paddle::string::Sprintf(RETAIN_GRAD_TEMPLATE, input_name);
} }
// [GradOpNode] SetGradInMeta // [GradOpNode] SetGradInMeta
......
...@@ -42,6 +42,7 @@ namespace py = ::pybind11; ...@@ -42,6 +42,7 @@ namespace py = ::pybind11;
PyTypeObject* p_eager_tensor_type; PyTypeObject* p_eager_tensor_type;
extern PyTypeObject* g_vartype_pytype; extern PyTypeObject* g_vartype_pytype;
extern PyTypeObject* g_framework_tensor_pytype;
PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) { PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
PyObject* obj = type->tp_alloc(type, 0); PyObject* obj = type->tp_alloc(type, 0);
...@@ -154,6 +155,37 @@ void InitEagerTensorWithEagerTensor(EagerTensorObject* self, ...@@ -154,6 +155,37 @@ void InitEagerTensorWithEagerTensor(EagerTensorObject* self,
} }
} }
void InitEagerTensorWithFrameworkTensor(EagerTensorObject* self,
const framework::Tensor& src,
const paddle::platform::Place& place,
const std::string& name) {
self->eager_tensor.set_name(name);
if (place == src.place()) {
std::shared_ptr<pten::DenseTensor> dense_tensor =
std::make_shared<pten::DenseTensor>(
pten::make_intrusive<paddle::experimental::SharedStorage>(place),
pten::DenseTensorMeta(pten::TransToPtenDataType(src.type()),
src.dims()));
paddle::experimental::ReMakePtenDenseTensor(src, dense_tensor.get());
self->eager_tensor.set_impl(dense_tensor);
VLOG(4) << "Same place, do ShareDataWith";
} else {
std::shared_ptr<pten::DenseTensor> dense_tensor =
std::make_shared<pten::DenseTensor>(
pten::make_intrusive<paddle::experimental::SharedStorage>(
src.place()),
pten::DenseTensorMeta(pten::TransToPtenDataType(src.type()),
src.dims()));
paddle::experimental::ReMakePtenDenseTensor(src, dense_tensor.get());
auto temp = egr::EagerTensor(dense_tensor);
self->eager_tensor.set_impl(
temp.copy_to(pten::TransToPtenBackend(place), true).impl());
VLOG(4) << "Different place, do TensorCopy";
}
egr::EagerUtils::autograd_meta(&(self->eager_tensor))->SetStopGradient(true);
egr::EagerUtils::unsafe_autograd_meta(self->eager_tensor)
->SetPersistable(false);
}
// TODO(jiabin): We have to do some ugly work, refactor this method using // TODO(jiabin): We have to do some ugly work, refactor this method using
// PyArg_ParseTuple(),PyArg_ParseTupleAndKeywords() and PyArg_Parse() later to // PyArg_ParseTuple(),PyArg_ParseTupleAndKeywords() and PyArg_Parse() later to
// support kwargs. // support kwargs.
...@@ -187,6 +219,11 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { ...@@ -187,6 +219,11 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
* ** tensor: EagerTensor, * ** tensor: EagerTensor,
* ** place: paddle::platform::Place, * ** place: paddle::platform::Place,
* ** name: std::string) * ** name: std::string)
* 7. (multi-place) (must have first 2 parameter)
* def __init__ (
* ** tensor: FrameworkTensor,
* ** place: paddle::platform::Place,
* ** name: std::string)
* **/ * **/
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
self, paddle::platform::errors::Fatal( self, paddle::platform::errors::Fatal(
...@@ -236,6 +273,15 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { ...@@ -236,6 +273,15 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
egr::Controller::Instance().GetExpectedPlace(), egr::Controller::Instance().GetExpectedPlace(),
egr::Controller::Instance().GenerateUniqueName("generated_tensor")); egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
return 0; return 0;
} else if (PyObject_IsInstance(
arg0_ptr,
reinterpret_cast<PyObject*>(g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer.";
auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0);
InitEagerTensorWithFrameworkTensor(
py_tensor_ptr, src_tensor, src_tensor.place(),
egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
return 0;
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or tensor with " "We only support construct tensor from numpy value or tensor with "
...@@ -275,6 +321,17 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { ...@@ -275,6 +321,17 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
py_tensor_ptr, src_tensor, place, py_tensor_ptr, src_tensor, place,
egr::Controller::Instance().GenerateUniqueName("generated_tensor")); egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
return 0; return 0;
} else if (PyObject_IsInstance(
arg0_ptr,
reinterpret_cast<PyObject*>(g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer.";
auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
InitEagerTensorWithFrameworkTensor(
py_tensor_ptr, src_tensor, place,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
return 0;
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or tensor with " "We only support construct tensor from numpy value or tensor with "
...@@ -282,6 +339,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { ...@@ -282,6 +339,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
"please check your input first and make sure you are on the right " "please check your input first and make sure you are on the right "
"way.")); "way."));
} }
return 0;
} }
case (Py_ssize_t)3: { case (Py_ssize_t)3: {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
...@@ -321,6 +379,24 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { ...@@ -321,6 +379,24 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place, InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place,
act_name); act_name);
return 0; return 0;
} else if (PyObject_IsInstance(
arg0_ptr,
reinterpret_cast<PyObject*>(g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer.";
auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 2);
if (name_obj == Py_None) {
act_name = egr::Controller::Instance().GenerateUniqueName(
"generated_tensor");
} else {
act_name = CastPyArg2AttrString(name_obj, 2);
}
InitEagerTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place,
act_name);
return 0;
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or tensor with " "We only support construct tensor from numpy value or tensor with "
...@@ -328,6 +404,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) { ...@@ -328,6 +404,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
"please check your input first and make sure you are on the right " "please check your input first and make sure you are on the right "
"way.")); "way."));
} }
return 0;
} }
case (Py_ssize_t)4: { case (Py_ssize_t)4: {
VLOG(6) << "Calling case3's initializer."; VLOG(6) << "Calling case3's initializer.";
......
...@@ -140,13 +140,15 @@ static PyObject* eager_tensor_method_copy_(EagerTensorObject* self, ...@@ -140,13 +140,15 @@ static PyObject* eager_tensor_method_copy_(EagerTensorObject* self,
static PyObject* eager_tensor_retain_grads(EagerTensorObject* self, static PyObject* eager_tensor_retain_grads(EagerTensorObject* self,
PyObject* args, PyObject* kwargs) { PyObject* args, PyObject* kwargs) {
EAGER_TRY EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&(self->eager_tensor)); if (egr::Controller::Instance().HasGrad()) {
if (!meta->GetMutableGradNode()) { auto meta = egr::EagerUtils::autograd_meta(&(self->eager_tensor));
VLOG(6) << "Make grad node of tensor: " << self->eager_tensor.name() if (!meta->GetMutableGradNode()) {
<< "become accumulation node"; VLOG(6) << "Make grad node of tensor: " << self->eager_tensor.name()
meta->SetGradNode(std::make_shared<egr::GradNodeAccumulation>()); << "become accumulation node";
meta->SetGradNode(std::make_shared<egr::GradNodeAccumulation>());
}
egr::egr_utils_api::RetainGradForTensor(self->eager_tensor);
} }
egr::egr_utils_api::RetainGradForTensor(self->eager_tensor);
Py_INCREF(Py_None); Py_INCREF(Py_None);
return Py_None; return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
......
...@@ -40,6 +40,7 @@ extern PyTypeObject* g_cpuplace_pytype; ...@@ -40,6 +40,7 @@ extern PyTypeObject* g_cpuplace_pytype;
extern PyTypeObject* g_xpuplace_pytype; extern PyTypeObject* g_xpuplace_pytype;
extern PyTypeObject* g_npuplace_pytype; extern PyTypeObject* g_npuplace_pytype;
extern PyTypeObject* g_cudapinnedplace_pytype; extern PyTypeObject* g_cudapinnedplace_pytype;
extern PyTypeObject* g_framework_tensor_pytype;
int TensorDtype2NumpyDtype(pten::DataType dtype) { int TensorDtype2NumpyDtype(pten::DataType dtype) {
switch (dtype) { switch (dtype) {
...@@ -300,6 +301,18 @@ platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) { ...@@ -300,6 +301,18 @@ platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) {
return place; return place;
} }
framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) {
if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_framework_tensor_pytype))) {
return ::pybind11::handle(obj).cast<framework::Tensor>();
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"EagerTensor, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
}
paddle::framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, paddle::framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
ssize_t arg_pos) { ssize_t arg_pos) {
paddle::framework::proto::VarType::Type dtype; paddle::framework::proto::VarType::Type dtype;
......
...@@ -35,6 +35,7 @@ egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos); ...@@ -35,6 +35,7 @@ egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos);
std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj, std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
ssize_t arg_pos); ssize_t arg_pos);
platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos); platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos);
framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos);
std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos); std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos);
framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
ssize_t arg_pos); ssize_t arg_pos);
......
...@@ -763,7 +763,6 @@ void BindImperative(py::module *m_ptr) { ...@@ -763,7 +763,6 @@ void BindImperative(py::module *m_ptr) {
[]() { imperative::SetLoadProcessSignalHandler(); }); []() { imperative::SetLoadProcessSignalHandler(); });
m.def("_throw_error_if_process_failed", m.def("_throw_error_if_process_failed",
[]() { imperative::ThrowErrorIfLoadProcessFailed(); }); []() { imperative::ThrowErrorIfLoadProcessFailed(); });
// Dygraph DataLoader reader process & thread related functions // Dygraph DataLoader reader process & thread related functions
m.def( m.def(
"_convert_to_tensor_list", "_convert_to_tensor_list",
...@@ -866,7 +865,10 @@ void BindImperative(py::module *m_ptr) { ...@@ -866,7 +865,10 @@ void BindImperative(py::module *m_ptr) {
m.def("start_imperative_gperf_profiler", m.def("start_imperative_gperf_profiler",
[]() { imperative::StartProfile(); }); []() { imperative::StartProfile(); });
m.def("_set_eager_tracer",
[](const std::shared_ptr<imperative::Tracer> &tracer) {
egr::Controller::Instance().SetCurrentTracer(tracer);
});
m.def("stop_imperative_gperf_profiler", []() { imperative::StopProfile(); }); m.def("stop_imperative_gperf_profiler", []() { imperative::StopProfile(); });
m.def("_is_dygraph_debug_enabled", m.def("_is_dygraph_debug_enabled",
......
...@@ -164,6 +164,7 @@ PyTypeObject *g_cpuplace_pytype = nullptr; ...@@ -164,6 +164,7 @@ PyTypeObject *g_cpuplace_pytype = nullptr;
PyTypeObject *g_xpuplace_pytype = nullptr; PyTypeObject *g_xpuplace_pytype = nullptr;
PyTypeObject *g_npuplace_pytype = nullptr; PyTypeObject *g_npuplace_pytype = nullptr;
PyTypeObject *g_cudapinnedplace_pytype = nullptr; PyTypeObject *g_cudapinnedplace_pytype = nullptr;
PyTypeObject *g_framework_tensor_pytype = nullptr;
bool IsCompiledWithCUDA() { bool IsCompiledWithCUDA() {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP) #if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
...@@ -733,7 +734,11 @@ PYBIND11_MODULE(core_noavx, m) { ...@@ -733,7 +734,11 @@ PYBIND11_MODULE(core_noavx, m) {
BindImperative(&m); BindImperative(&m);
py::class_<framework::Tensor>(m, "Tensor", py::buffer_protocol()) py::class_<framework::Tensor> framework_tensor(m, "Tensor",
py::buffer_protocol());
g_framework_tensor_pytype =
reinterpret_cast<PyTypeObject *>(framework_tensor.ptr());
framework_tensor
.def("__array__", .def("__array__",
[](framework::Tensor &self) { return TensorToPyArray(self); }) [](framework::Tensor &self) { return TensorToPyArray(self); })
.def("_is_initialized", .def("_is_initialized",
......
...@@ -411,6 +411,28 @@ void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, ...@@ -411,6 +411,28 @@ void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src,
} }
} }
void ReMakePtenDenseTensor(const paddle::framework::Tensor& src,
pten::DenseTensor* dst) {
auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst);
meta->dims = src.dims();
// Since the type of DenseTensorMeta is const, const_cast must be used
const_cast<DataType&>(meta->dtype) = pten::TransToPtenDataType(src.type());
// Since the type of DenseTensorMeta is const, const_cast must be used
const_cast<DataLayout&>(meta->layout) =
pten::TransToPtenDataLayout(src.layout());
auto* shared_storage = static_cast<SharedStorage*>(
pten::CompatibleDenseTensorUtils::UnsafeGetMutableStorage(dst));
PADDLE_ENFORCE_NOT_NULL(
shared_storage,
platform::errors::NotFound(
"Target DenseTensor's shared storage is nullptr."));
if (src.IsInitialized()) {
shared_storage->ResetAllocation(src.Holder(), src.offset());
}
}
void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src,
const pten::TensorArgDef& arg_def, const pten::TensorArgDef& arg_def,
pten::DenseTensor* dst) { pten::DenseTensor* dst) {
......
...@@ -75,6 +75,9 @@ void MovesSharedStorage(pten::DenseTensor* src, ...@@ -75,6 +75,9 @@ void MovesSharedStorage(pten::DenseTensor* src,
void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src, void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src,
pten::DenseTensor* dst); pten::DenseTensor* dst);
void ReMakePtenDenseTensor(const paddle::framework::Tensor& src,
pten::DenseTensor* dst);
void ReMakePtenDenseTensor(const paddle::framework::Tensor& src, void ReMakePtenDenseTensor(const paddle::framework::Tensor& src,
const pten::TensorArgDef& arg_def, const pten::TensorArgDef& arg_def,
pten::DenseTensor* dst); pten::DenseTensor* dst);
......
...@@ -268,6 +268,7 @@ if avx_supported(): ...@@ -268,6 +268,7 @@ if avx_supported():
from .core_avx import _is_dygraph_debug_enabled from .core_avx import _is_dygraph_debug_enabled
from .core_avx import _dygraph_debug_level from .core_avx import _dygraph_debug_level
from .core_avx import _switch_tracer from .core_avx import _switch_tracer
from .core_avx import _set_eager_tracer
from .core_avx import _disable_eager_mode from .core_avx import _disable_eager_mode
from .core_avx import _enable_eager_mode from .core_avx import _enable_eager_mode
from .core_avx import _in_eager_mode from .core_avx import _in_eager_mode
...@@ -324,6 +325,7 @@ if load_noavx: ...@@ -324,6 +325,7 @@ if load_noavx:
from .core_noavx import _is_dygraph_debug_enabled from .core_noavx import _is_dygraph_debug_enabled
from .core_noavx import _dygraph_debug_level from .core_noavx import _dygraph_debug_level
from .core_noavx import _switch_tracer from .core_noavx import _switch_tracer
from .core_noavx import _set_eager_tracer
from .core_noavx import _disable_eager_mode from .core_noavx import _disable_eager_mode
from .core_noavx import _enable_eager_mode from .core_noavx import _enable_eager_mode
from .core_noavx import _in_eager_mode from .core_noavx import _in_eager_mode
......
...@@ -81,10 +81,13 @@ core._disable_eager_mode() ...@@ -81,10 +81,13 @@ core._disable_eager_mode()
@signature_safe_contextmanager @signature_safe_contextmanager
def _test_eager_guard(): def _test_eager_guard(tracer=None):
core._enable_eager_mode() core._enable_eager_mode()
_C_ops.switch_to_eager_ops() _C_ops.switch_to_eager_ops()
core._switch_tracer(_dygraph_tracer_) if tracer is None:
core._set_eager_tracer(_dygraph_tracer_)
else:
core._set_eager_tracer(tracer)
try: try:
yield yield
finally: finally:
......
...@@ -16,7 +16,7 @@ import paddle.fluid.core as core ...@@ -16,7 +16,7 @@ import paddle.fluid.core as core
import paddle.fluid.eager.eager_tensor_patch_methods as eager_tensor_patch_methods import paddle.fluid.eager.eager_tensor_patch_methods as eager_tensor_patch_methods
import paddle import paddle
import numpy as np import numpy as np
from paddle.fluid.framework import _test_eager_guard, EagerParamBase from paddle.fluid.framework import _test_eager_guard, EagerParamBase, _in_eager_mode
from paddle.fluid.data_feeder import convert_dtype from paddle.fluid.data_feeder import convert_dtype
import unittest import unittest
import copy import copy
...@@ -221,6 +221,36 @@ class EagerTensorPropertiesTestCase(unittest.TestCase): ...@@ -221,6 +221,36 @@ class EagerTensorPropertiesTestCase(unittest.TestCase):
self.assertTrue(egr_tensor9.place._equals(place)) self.assertTrue(egr_tensor9.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4)) self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4))
x = np.random.rand(3, 3).astype('float32')
t = paddle.fluid.Tensor()
t.set(x, paddle.fluid.CPUPlace())
egr_tensor10 = core.eager.EagerTensor(t, place)
self.assertEqual(egr_tensor10.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor10.name)
self.assertEqual(egr_tensor10.shape, [3, 3])
self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor10.stop_gradient, True)
self.assertTrue(egr_tensor10.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor10.numpy(), x))
egr_tensor11 = core.eager.EagerTensor(t, place, "framework_constructed")
self.assertEqual(egr_tensor11.persistable, False)
self.assertTrue("framework_constructed" in egr_tensor11.name)
self.assertEqual(egr_tensor11.shape, [3, 3])
self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor11.stop_gradient, True)
self.assertTrue(egr_tensor11.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor11.numpy(), x))
egr_tensor12 = core.eager.EagerTensor(t)
self.assertEqual(egr_tensor12.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor12.name)
self.assertEqual(egr_tensor12.shape, [3, 3])
self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor12.stop_gradient, True)
self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
self.assertTrue(np.array_equal(egr_tensor12.numpy(), x))
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, "The shape of Parameter should not be None"): ValueError, "The shape of Parameter should not be None"):
eager_param = EagerParamBase(shape=None, dtype="float32") eager_param = EagerParamBase(shape=None, dtype="float32")
...@@ -423,5 +453,12 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): ...@@ -423,5 +453,12 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
self.assertTrue(np.array_equal(res3, res4)) self.assertTrue(np.array_equal(res3, res4))
class EagerGuardTestCase(unittest.TestCase):
def test__test_eager_guard(self):
tracer = paddle.fluid.dygraph.tracer.Tracer()
with _test_eager_guard(tracer):
self.assertTrue(_in_eager_mode())
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -24,7 +24,7 @@ from test_imperative_base import new_program_scope ...@@ -24,7 +24,7 @@ from test_imperative_base import new_program_scope
import paddle.fluid.dygraph_utils as dygraph_utils import paddle.fluid.dygraph_utils as dygraph_utils
from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard, _in_eager_mode
class MyLayer(fluid.Layer): class MyLayer(fluid.Layer):
...@@ -259,12 +259,28 @@ class TestImperative(unittest.TestCase): ...@@ -259,12 +259,28 @@ class TestImperative(unittest.TestCase):
self.func_isinstance() self.func_isinstance()
self.func_isinstance() self.func_isinstance()
def test_create_VarBase(self): def func_create_varbase(self):
x = np.ones([2, 2], np.float32) x = np.ones([2, 2], np.float32)
y = np.zeros([3, 3], np.float32) y = np.zeros([3, 3], np.float32)
t = fluid.Tensor() t = fluid.Tensor()
t.set(x, fluid.CPUPlace()) t.set(x, fluid.CPUPlace())
with fluid.dygraph.guard(): if _in_eager_mode():
# TODO(jiabin): Support Kwargs and uncomment these tests
# egr_tmp = fluid.core.eager.EagerTensor(value=x, place=fluid.core.CPUPlace())
egr_tmp2 = fluid.core.eager.EagerTensor(y, fluid.core.CPUPlace())
egr_tmp3 = paddle.to_tensor(x)
egr_tmp4 = fluid.core.eager.EagerTensor(y)
# egr_tmp5 = fluid.core.eager.EagerTensor(value=x)
# TODO(jiabin): Support it when we merge LoDTensor with DenseTensor
egr_tmp6 = fluid.core.eager.EagerTensor(t)
# self.assertTrue(np.array_equal(x, egr_tmp.numpy()))
self.assertTrue(np.array_equal(y, egr_tmp2.numpy()))
self.assertTrue(np.array_equal(x, egr_tmp3.numpy()))
self.assertTrue(np.array_equal(y, egr_tmp4.numpy()))
# self.assertTrue(np.array_equal(x, egr_tmp5.numpy()))
self.assertTrue(np.array_equal(x, egr_tmp6.numpy()))
else:
tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace()) tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace())
tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace()) tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace())
tmp3 = paddle.to_tensor(x) tmp3 = paddle.to_tensor(x)
...@@ -279,6 +295,12 @@ class TestImperative(unittest.TestCase): ...@@ -279,6 +295,12 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.array_equal(x, tmp5.numpy())) self.assertTrue(np.array_equal(x, tmp5.numpy()))
self.assertTrue(np.array_equal(x, tmp6.numpy())) self.assertTrue(np.array_equal(x, tmp6.numpy()))
def test_create_varbase(self):
with fluid.dygraph.guard():
with _test_eager_guard():
self.func_create_varbase()
self.func_create_varbase()
def test_no_grad_guard(self): def test_no_grad_guard(self):
data = np.array([[2, 3], [4, 5]]).astype('float32') data = np.array([[2, 3], [4, 5]]).astype('float32')
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -758,7 +780,7 @@ class TestImperative(unittest.TestCase): ...@@ -758,7 +780,7 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.allclose(dy_grad_h2h2, static_grad_h2h)) self.assertTrue(np.allclose(dy_grad_h2h2, static_grad_h2h))
self.assertTrue(np.allclose(dy_grad_i2h2, static_grad_i2h)) self.assertTrue(np.allclose(dy_grad_i2h2, static_grad_i2h))
def test_layer_attrs(self): def func_layer_attrs(self):
layer = fluid.dygraph.Layer("test") layer = fluid.dygraph.Layer("test")
layer.test_attr = 1 layer.test_attr = 1
self.assertFalse(hasattr(layer, "whatever")) self.assertFalse(hasattr(layer, "whatever"))
...@@ -778,6 +800,11 @@ class TestImperative(unittest.TestCase): ...@@ -778,6 +800,11 @@ class TestImperative(unittest.TestCase):
my_layer.l1 = None my_layer.l1 = None
self.assertEqual(len(my_layer.sublayers()), 0) self.assertEqual(len(my_layer.sublayers()), 0)
def test_layer_attrs(self):
with _test_eager_guard():
self.func_layer_attrs()
self.func_layer_attrs()
class TestDygraphUtils(unittest.TestCase): class TestDygraphUtils(unittest.TestCase):
def func_append_activation_in_dygraph_exception(self): def func_append_activation_in_dygraph_exception(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册