未验证 提交 4ba6d4e4 编写于 作者: W Weilong Wu 提交者: GitHub

Support EagerTensor initialization with kwargs (#38488)

* Support EagerTensor init with kwargs

* Updated comments

* Updated unit tests case

* Refactor InitTensor related code to reduce duplicate code

* Updated the error reporting msg

* Updated VLOG msg

* Merge develop and Update EagerTensor init func

* Polish switch case, reduce some code

* Add SyntaxError unit test case

* Refactor the related initialization func of EagerTensor

* Remove ParseStopGradient and ParseZeroCopy and ParsePersistable, construct ParseBooleanArgs instead.

* Updated error msg to pass CI

* Updated PADDLE_ENFORCE error type
上级 55cd9cb8
......@@ -192,11 +192,208 @@ void InitEagerTensorWithFrameworkTensor(EagerTensorObject* self,
egr::EagerUtils::unsafe_autograd_meta(self->eager_tensor)
->SetPersistable(false);
}
// TODO(jiabin): We have to do some ugly work, refactor this method using
// PyArg_ParseTuple(),PyArg_ParseTupleAndKeywords() and PyArg_Parse() later to
// support kwargs.
int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
/** We should have init function with signature:
py::object ParsePyArray(
std::unordered_map<std::string, PyObject*> kws_map,
std::unordered_map<std::string, Py_ssize_t> kw_order_map, PyObject* args,
bool flag_kwargs, Py_ssize_t args_num) {
py::object numpy_value = py::object();
if (kw_order_map["value"] <= args_num) {
numpy_value = py::object(
py::handle(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1)), true);
} else {
if (flag_kwargs && kws_map["value"] != NULL) {
numpy_value = py::object(py::handle(kws_map["value"]), true);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The first expected arguments is {value: PyArray}, "
"but could not parse the first argument {value: PyArray} "
"successfully. "
"Please check your input first and make sure you are on the right "
"way."));
}
}
return numpy_value;
}
paddle::platform::Place ParsePlace(
std::unordered_map<std::string, PyObject*> kws_map,
std::unordered_map<std::string, Py_ssize_t> kw_order_map, PyObject* args,
bool flag_kwargs, Py_ssize_t args_num) {
paddle::platform::Place place =
egr::Controller::Instance().GetExpectedPlace();
if (kw_order_map["place"] <= args_num) {
place = CastPyArg2Place(PyTuple_GET_ITEM(args, kw_order_map["place"] - 1),
kw_order_map["place"] - 1);
} else {
if (flag_kwargs && kws_map["place"] != NULL) {
place = CastPyArg2Place(kws_map["place"], 0);
} else {
// default
return place;
}
}
return place;
}
// boolean arguments: zero_copy, stop_gradient, persistable
bool ParseBooleanArgs(std::string key,
std::unordered_map<std::string, PyObject*> kws_map,
std::unordered_map<std::string, Py_ssize_t> kw_order_map,
PyObject* args, bool flag_kwargs, Py_ssize_t args_num) {
bool res = false;
if (key == "stop_gradient") res = true;
if (kw_order_map[key] <= args_num) {
res = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, kw_order_map[key] - 1),
kw_order_map[key] - 1);
} else {
if (flag_kwargs && kws_map[key] != NULL) {
res = CastPyArg2AttrBoolean(kws_map[key], 0);
} else {
return res;
}
}
return res;
}
std::string ParseName(std::unordered_map<std::string, PyObject*> kws_map,
std::unordered_map<std::string, Py_ssize_t> kw_order_map,
PyObject* args, bool flag_kwargs, Py_ssize_t args_num) {
std::string act_name = "";
if (kw_order_map["name"] <= args_num) {
PyObject* name_obj = PyTuple_GET_ITEM(args, kw_order_map["name"] - 1);
if (name_obj == Py_None) {
act_name =
egr::Controller::Instance().GenerateUniqueName("generated_tensor");
} else {
act_name = CastPyArg2AttrString(name_obj, kw_order_map["name"] - 1);
}
} else {
if (flag_kwargs) {
if (kws_map["name"] == NULL) {
act_name =
egr::Controller::Instance().GenerateUniqueName("generated_tensor");
} else {
act_name = CastPyArg2AttrString(kws_map["name"], 0);
}
} else {
act_name =
egr::Controller::Instance().GenerateUniqueName("generated_tensor");
}
}
return act_name;
}
// initialize EagerTensor by PyArray(first argument is PyArray,
// mix args and kwargs) automatically.
void AutoInitEagerTensorByPyArray(
EagerTensorObject* py_tensor_ptr,
std::unordered_map<std::string, PyObject*> kws_map, PyObject* args,
bool flag_kwargs, Py_ssize_t args_num) {
// The first argument of the EagerTensor constructor is PyArray,
// there are 6 arguments to construct the new EagerTensor,
// kw_order_map's key is every arguments of the constructor,
// kw_order_map's value is the position of the arguments respectively.
// If u want to update this constructor with new arguments,
// need to update this map and to add or change related code.
std::unordered_map<std::string, Py_ssize_t> kw_order_map{
{"value", 1}, {"place", 2}, {"persistable", 3},
{"zero_copy", 4}, {"name", 5}, {"stop_gradient", 6}};
py::object numpy_value = py::object();
paddle::platform::Place place =
egr::Controller::Instance().GetExpectedPlace();
bool persistable = false;
bool zero_copy = false;
std::string act_name = "";
bool stop_gradient = true;
numpy_value =
ParsePyArray(kws_map, kw_order_map, args, flag_kwargs, args_num);
place = ParsePlace(kws_map, kw_order_map, args, flag_kwargs, args_num);
persistable = ParseBooleanArgs("persistable", kws_map, kw_order_map, args,
flag_kwargs, args_num);
zero_copy = ParseBooleanArgs("zero_copy", kws_map, kw_order_map, args,
flag_kwargs, args_num);
act_name = ParseName(kws_map, kw_order_map, args, flag_kwargs, args_num);
stop_gradient = ParseBooleanArgs("stop_gradient", kws_map, kw_order_map, args,
flag_kwargs, args_num);
EmptyEagerTensorInitializer(py_tensor_ptr, act_name, place, persistable,
stop_gradient);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
}
// initialize EagerTensor by EagerTensor or framework::Tensor (mix args and
// kwargs) automatically.
void AutoInitEagerTensorByTensor(
EagerTensorObject* py_tensor_ptr,
std::unordered_map<std::string, PyObject*> kws_map, PyObject* args,
bool flag_kwargs, Py_ssize_t args_num, bool init_by_egr_tensor = true) {
// The first argument of the EagerTensor constructor is EagerTensor or
// framework Tensor,
// there are 3 arguments to construct the new EagerTensor,
// kw_order_map's key is every arguments of the constructor,
// kw_order_map's value is the position of the arguments respectively.
// If u want to update this constructor with new arguments,
// need to update this map and to add or change related code.
std::unordered_map<std::string, Py_ssize_t> kw_order_map{
{"value", 1}, {"place", 2}, {"name", 3}};
paddle::platform::Place place =
egr::Controller::Instance().GetExpectedPlace();
std::string act_name = "";
place = ParsePlace(kws_map, kw_order_map, args, flag_kwargs, args_num);
act_name = ParseName(kws_map, kw_order_map, args, flag_kwargs, args_num);
if (init_by_egr_tensor) {
egr::EagerTensor src_tensor;
if (kw_order_map["value"] <= args_num) {
src_tensor = CastPyArg2EagerTensor(
PyTuple_GET_ITEM(args, kw_order_map["value"] - 1),
kw_order_map["value"] - 1);
} else {
if (flag_kwargs && kws_map["value"] != NULL) {
src_tensor = CastPyArg2EagerTensor(kws_map["value"], 0);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The first expected kwargs is {value: EagerTensor}, "
"but could not parse the first argument {value: EagerTensor} "
"successfully. "
"Please check your input first and make sure you are on the right "
"way."));
}
}
InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place, act_name);
} else {
// init by framework tensor
framework::Tensor src_tensor;
if (kw_order_map["value"] <= args_num) {
src_tensor = CastPyArg2FrameworkTensor(
PyTuple_GET_ITEM(args, kw_order_map["value"] - 1),
kw_order_map["value"] - 1);
} else {
if (flag_kwargs && kws_map["value"] != NULL) {
src_tensor = CastPyArg2FrameworkTensor(kws_map["value"], 0);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The first expected arguments is {value: framework::Tensor}, "
"but could not parse the first argument {value: framework::Tensor} "
"successfully. "
"Please check your input first and make sure you are on the right "
"way."));
}
}
InitEagerTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place,
act_name);
}
}
/** We should have init function with signature:
* 1.
* def __init__ ()
* 2.
......@@ -204,9 +401,11 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
* ** dtype: paddle::framework::proto::VarType::Type,
* ** dims: vector<int>,
* ** name: std::string,
* ** type: paddle::framework::proto::VarType::Type,
* ** type: paddle::framework::proto::VarType::LodTensor,
* ** persistable: bool)
* 3. (multi-place) (must have first 2 parameter)
* 3. (multi-place)
* (should have at least one parameter, one parameter equals to case 4, zero
* parameter equals to case 1)
* def __init__ (
* ** value: ndarray,
* ** place: paddle::platform::Place,
......@@ -220,17 +419,77 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
* 5.
* def __init__ (
* ** tensor: EagerTensor)
* 6. (multi-place) (must have first 2 parameter)
* 6. (multi-place)
* (should have at least one parameter, one parameter equals to case 5, zero
* parameter equals to case 1.)
* def __init__ (
* ** tensor: EagerTensor,
* ** place: paddle::platform::Place,
* ** name: std::string)
* 7. (multi-place) (must have first 2 parameter)
* 7. (multi-place) (should have at least one parameter, one parameter similar
* to case 5, zero parameter equals to case 1.)
* def __init__ (
* ** tensor: FrameworkTensor,
* ** place: paddle::platform::Place,
* ** name: std::string)
* **/
int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
// set a flag to record use kwargs or not
bool flag_kwargs = false;
if (kwargs) flag_kwargs = true;
// all kwargs
PyObject* kw_zero_copy = NULL;
PyObject* kw_persistable = NULL;
PyObject* kw_stop_gradient = NULL;
PyObject* kw_value = NULL; // receive PyArray or EagerTensor
PyObject* kw_place = NULL;
PyObject* kw_name = NULL;
PyObject* kw_dims = NULL;
PyObject* kw_dtype = NULL;
PyObject* kw_type = NULL;
// the keywords argument
static char* kwlist[] = {
const_cast<char*>("value"), const_cast<char*>("place"),
const_cast<char*>("persistable"), const_cast<char*>("zero_copy"),
const_cast<char*>("name"), const_cast<char*>("stop_gradient"),
const_cast<char*>("dims"), const_cast<char*>("dtype"),
const_cast<char*>("type"), NULL};
// 'O' Store a Python object (without any conversion) in a C object pointer,
// '|' Indicates that the remaining arguments in the Python argument list are
// optional.
// PyArg_ParseTupleAndKeywords can Parse the parameters of a function that
// takes both positional and keyword parameters into local variables,
// which enhance case2, case3, case4, case5, case6, case7.
bool flag_ = PyArg_ParseTupleAndKeywords(
args, kwargs, "|OOOOOOOOO", kwlist, &kw_value, &kw_place, &kw_persistable,
&kw_zero_copy, &kw_name, &kw_stop_gradient, &kw_dims, &kw_dtype,
&kw_type);
// helper map
std::unordered_map<std::string, PyObject*> kws_map{
{"value", kw_value},
{"place", kw_place},
{"persistable", kw_persistable},
{"zero_copy", kw_zero_copy},
{"name", kw_name},
{"stop_gradient", kw_stop_gradient},
{"dims", kw_dims},
{"dtype", kw_dtype},
{"type", kw_type}};
PADDLE_ENFORCE_EQ(flag_, true,
paddle::platform::errors::PreconditionNotMet(
"Could not parse args and kwargs successfully, "
"please check your input first and make"
"sure you are on the right way. "
"The expected arguments as follow: ("
"value, place, persistable, zero_copy, "
"name, stop_gradient, dims, dtype, type)"));
PADDLE_ENFORCE_NOT_NULL(
self, paddle::platform::errors::Fatal(
"Calling __init__ of Eager Tensor without __new__ is "
......@@ -239,10 +498,12 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
auto py_tensor_ptr = reinterpret_cast<EagerTensorObject*>(self);
// TODO(jiabin): Only support case 2 for now
Py_ssize_t args_num = PyTuple_Size(args);
switch (args_num) {
case (Py_ssize_t)0: {
VLOG(6) << " args_num: " << args_num;
// args_num = 0, means that there is no position arguments.
if (args_num == (Py_ssize_t)0) {
if (!flag_kwargs) {
// case 1
VLOG(6) << "Calling case1's initializer.";
EmptyEagerTensorInitializer(
......@@ -250,194 +511,150 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
egr::Controller::Instance().GetExpectedPlace());
return 0;
}
case (Py_ssize_t)1: {
// case 4, 5
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case4's initializer.";
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray. "
"But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value = py::object(py::handle(arg0_ptr), true);
EmptyEagerTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
egr::Controller::Instance().GetExpectedPlace());
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value,
/** zero copy **/ false);
} else { // no position args, all arguments are kwargs
if (kw_value != NULL) {
if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) {
VLOG(6) << "Calling case3's or case4's initializer";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args,
flag_kwargs, args_num);
return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
} else if (PyObject_IsInstance(kw_value, reinterpret_cast<PyObject*>(
p_eager_tensor_type))) {
VLOG(6) << "Calling case5's initializer.";
auto src_tensor = CastPyArg2EagerTensor(arg0_ptr, 0);
InitEagerTensorWithEagerTensor(
py_tensor_ptr, src_tensor,
egr::Controller::Instance().GetExpectedPlace(),
egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
VLOG(6) << "Calling case5's or case6's initializer";
AutoInitEagerTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else if (PyObject_IsInstance(
arg0_ptr,
reinterpret_cast<PyObject*>(g_framework_tensor_pytype))) {
} else if (PyObject_IsInstance(kw_value,
reinterpret_cast<PyObject*>(
g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer.";
auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0);
InitEagerTensorWithFrameworkTensor(
py_tensor_ptr, src_tensor, src_tensor.place(),
egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
AutoInitEagerTensorByTensor(
py_tensor_ptr, kws_map, args, flag_kwargs, args_num,
/* false means not init by egr tensor*/ false);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or tensor with "
"python args by this initializer, "
"please check your input first and make sure you are on the right "
"way."));
"Could not parse the first keyword argument successfully, "
"the first keyword argument is value, but it should be PyArray "
"or EagerTensor or framework::Tensor. "
"Please check your input first and make sure you are on the "
"right way."));
}
return 0;
} else if (kw_dtype != NULL &&
PyObject_IsInstance(
kw_dtype, reinterpret_cast<PyObject*>(g_vartype_pytype))) {
VLOG(6) << "Calling case2's initializer";
PADDLE_ENFORCE_NOT_NULL(
kw_dims,
paddle::platform::errors::InvalidArgument(
"Calling __init__ of Eager Tensor with NULL dims is "
"forbidden. Please check your code and make sure you new a "
"dims before calling this constructor."));
PADDLE_ENFORCE_NOT_NULL(
kw_name,
paddle::platform::errors::InvalidArgument(
"Calling __init__ of Eager Tensor with NULL name is "
"forbidden. Please check your code and make sure you new a "
"name before calling this constructor."));
PADDLE_ENFORCE_NOT_NULL(
kw_dtype,
paddle::platform::errors::InvalidArgument(
"Calling __init__ of Eager Tensor with NULL dtype is "
"forbidden. Please check your code and make sure you new a "
"dtype before calling this constructor."));
PADDLE_ENFORCE_NOT_NULL(
kw_persistable,
paddle::platform::errors::InvalidArgument(
"Calling __init__ of Eager Tensor with NULL persistable is "
"forbidden. Please check your code and make sure you new a "
"persistable before calling this constructor."));
paddle::framework::proto::VarType::Type dtype =
CastPyArg2ProtoType(kw_dtype, 0);
std::vector<int> dims = CastPyArg2VectorOfInt(kw_dims, 0);
std::string act_name = "";
if (kw_name == Py_None) {
act_name = egr::Controller::Instance().GenerateUniqueName(
"generated_tensor");
} else {
act_name = CastPyArg2AttrString(kw_name, 0);
}
case (Py_ssize_t)2: {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's initializer.";
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray. "
"But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value = py::object(py::handle(arg0_ptr), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
paddle::framework::proto::VarType::Type var_type =
CastPyArg2ProtoType(kw_type, 0);
bool persistable = CastPyArg2AttrBoolean(kw_persistable, 0);
EmptyEagerTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
place);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value,
/** zero copy **/ false);
return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
p_eager_tensor_type))) {
VLOG(6) << "Calling case6's initializer.";
auto src_tensor = CastPyArg2EagerTensor(arg0_ptr, 0);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
InitEagerTensorWithEagerTensor(
py_tensor_ptr, src_tensor, place,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
return 0;
} else if (PyObject_IsInstance(
arg0_ptr,
reinterpret_cast<PyObject*>(g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer.";
auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
InitEagerTensorWithFrameworkTensor(
py_tensor_ptr, src_tensor, place,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
py_tensor_ptr, act_name,
egr::Controller::Instance().GetExpectedPlace(), persistable,
/* stop_gradient */ true, dtype, dims, var_type);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or tensor with "
"python args by this initializer, "
"please check your input first and make sure you are on the right "
"way."));
"We not only support construct EagerTensor from numpy value "
"or tensor(EagerTensor or framework::Tensor) "
"with python kwargs by this initializer, "
"but also even support dtype to init a empty EagerTensor. "
"Please check your input first and make sure you call the existed "
"constructor."));
}
return 0;
}
case (Py_ssize_t)3: {
} else if (args_num == (Py_ssize_t)1 || args_num == (Py_ssize_t)2 ||
args_num == (Py_ssize_t)3) {
// 1 to 3 position args, remainting arguments are kwargs
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's initializer.";
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray. "
"But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value = py::object(py::handle(arg0_ptr), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
EmptyEagerTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
place, persistable);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value,
/** zero copy **/ false);
VLOG(6) << "Calling case3's or case4's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
p_eager_tensor_type))) {
VLOG(6) << "Calling case6's initializer.";
auto src_tensor = CastPyArg2EagerTensor(arg0_ptr, 0);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 2);
if (name_obj == Py_None) {
act_name = egr::Controller::Instance().GenerateUniqueName(
"generated_tensor");
} else {
act_name = CastPyArg2AttrString(name_obj, 2);
}
InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place,
act_name);
VLOG(6) << "Calling case5's or case6's initializer.";
AutoInitEagerTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else if (PyObject_IsInstance(
arg0_ptr,
reinterpret_cast<PyObject*>(g_framework_tensor_pytype))) {
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer.";
auto src_tensor = CastPyArg2FrameworkTensor(arg0_ptr, 0);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 2);
if (name_obj == Py_None) {
act_name = egr::Controller::Instance().GenerateUniqueName(
"generated_tensor");
} else {
act_name = CastPyArg2AttrString(name_obj, 2);
}
InitEagerTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place,
act_name);
AutoInitEagerTensorByTensor(
py_tensor_ptr, kws_map, args, flag_kwargs, args_num,
/* false means not init by egr tensor*/ false);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or tensor with "
"python args by this initializer, "
"please check your input first and make sure you are on the right "
"way."));
"We support construct EagerTensor from numpy value "
"or tensor(EagerTensor or framework::Tensor) "
"with python args and kwargs by this initializer, "
"but the first argument should be PyArray or EagerTensor or "
"framework::Tensor. "
"Please check your input first and make sure you call the existed "
"constructor."));
}
return 0;
}
case (Py_ssize_t)4: {
VLOG(6) << "Calling case3's initializer.";
} else if (args_num == (Py_ssize_t)4) {
// 4 position args, remainting arguments are kwargs
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray, "
"\n ** place: paddle::platform::Place, \n ** persistable: bool, "
"\n ** zero_copy: bool, \n ** name: std::string, \n ** "
"stop_gradient: bool. But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value =
py::object(py::handle(PyTuple_GET_ITEM(args, 0)), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
bool zero_copy = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
EmptyEagerTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
place, persistable);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible constructor arguments, "
"there are 4 position args and remainting arguments arg kwargs,"
"but the first position args should be PyArray. "
"Please check your code and make sure the first position args is "
"PyArray."));
}
case (Py_ssize_t)5: {
// case 2
} else if (args_num == (Py_ssize_t)5) {
if (!flag_kwargs) {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (PyObject_IsInstance(arg0_ptr,
reinterpret_cast<PyObject*>(g_vartype_pytype))) {
......@@ -445,7 +662,6 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
paddle::framework::proto::VarType::Type dtype =
CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 0), 0);
std::vector<int> dims =
CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 1), 1);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 2);
......@@ -463,82 +679,57 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
egr::Controller::Instance().GetExpectedPlace(), persistable, true,
dtype, dims, var_type);
return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
p_eager_tensor_type))) {
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray, "
"\n ** place: paddle::platform::Place, \n ** persistable: "
"bool, \n ** zero_copy: bool, \n ** name: std::string, \n ** "
"stop_gradient: bool. But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value =
py::object(py::handle(PyTuple_GET_ITEM(args, 0)), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
bool zero_copy = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 4);
if (name_obj == Py_None) {
act_name = egr::Controller::Instance().GenerateUniqueName(
"generated_tensor");
} else if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else {
act_name = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 4), 4);
PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible constructor arguments, "
"there are only 5 position args,"
"but the first position args should be PyArray or dtype. "
"Please check your code and make sure you call the existed "
"constructor."));
}
EmptyEagerTensorInitializer(py_tensor_ptr, act_name, place,
persistable);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
} else { // five position args, remainting arguments are kwargs
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or dtype with "
"python args by this initializer, "
"please check your input first and make sure you are on the right "
"way."));
"Incompatible constructor arguments, "
"there are 5 position args and remainting arguments are kwargs,"
"but the first position args should be PyArray. "
"Please check your code and make sure the first position args is "
"PyArray."));
}
return 0;
}
case (Py_ssize_t)6: {
} else if (args_num == (Py_ssize_t)6) {
if (!flag_kwargs) {
// case 3
VLOG(6) << "Calling case3's initializer.";
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray, "
"\n ** place: paddle::platform::Place, \n ** persistable: bool, "
"\n ** zero_copy: bool, \n ** name: std::string, \n ** "
"stop_gradient: bool. But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value =
py::object(py::handle(PyTuple_GET_ITEM(args, 0)), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
bool zero_copy = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 4);
if (name_obj == Py_None) {
act_name =
egr::Controller::Instance().GenerateUniqueName("generated_tensor");
} else {
act_name = CastPyArg2AttrString(name_obj, 4);
}
bool stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 5), 5);
EmptyEagerTensorInitializer(py_tensor_ptr, act_name, place, persistable,
stop_gradient);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else { // six position args, remainting arguments are kwargs, but this
// is not a right way
PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible constructor arguments, "
"there are 6 position args and the remainting arguments are kwargs. "
"Please check your code and make sure the first position args is "
"PyArray."));
}
default: {
} else {
PADDLE_THROW(platform::errors::Fatal(
"Can't not find expected num of args, please check your call, and "
"make sure u call the existed constructor."));
return 1;
}
}
return 1;
}
static void eagertensor_dealloc(EagerTensorObject* self) {
......@@ -607,7 +798,7 @@ void BindEager(pybind11::module* module) {
p_eager_tensor_type = &eager_tensor_type;
if (PyType_Ready(&eager_tensor_type) < 0) {
PADDLE_THROW(platform::errors::Fatal(
"Init Paddle erroe in BindEager(PyType_Ready)."));
"Init Paddle error in BindEager(PyType_Ready)."));
return;
}
......@@ -617,7 +808,7 @@ void BindEager(pybind11::module* module) {
Py_DECREF(&eager_tensor_type);
Py_DECREF(m.ptr());
PADDLE_THROW(platform::errors::Fatal(
"Init Paddle erroe in BindEager(PyModule_AddObject)."));
"Init Paddle error in BindEager(PyModule_AddObject)."));
return;
}
......
......@@ -20,6 +20,7 @@ from paddle.fluid.framework import _test_eager_guard, EagerParamBase, _in_eager_
from paddle.fluid.data_feeder import convert_dtype
import unittest
import copy
import paddle.compat as cpt
class EagerScaleTestCase(unittest.TestCase):
......@@ -289,6 +290,316 @@ class EagerTensorPropertiesTestCase(unittest.TestCase):
for p in place_list:
self.constructor(p)
def constructor_with_kwargs(self, place):
# init EagerTensor by Python array
arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor0 = core.eager.EagerTensor(value=arr)
self.assertEqual(egr_tensor0.persistable, False)
self.assertTrue("generated" in egr_tensor0.name)
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
self.assertTrue(
egr_tensor0.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor0.stop_gradient, True)
egr_tensor1 = core.eager.EagerTensor(value=arr, place=place)
self.assertEqual(egr_tensor1.persistable, False)
self.assertTrue("generated" in egr_tensor1.name)
self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor1.place._equals(place))
self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor1.stop_gradient, True)
egr_tensor2 = core.eager.EagerTensor(arr, place=place)
self.assertEqual(egr_tensor2.persistable, False)
self.assertTrue("generated" in egr_tensor2.name)
self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor2.place._equals(place))
self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor2.stop_gradient, True)
egr_tensor3 = core.eager.EagerTensor(
arr, place=place, name="new_eager_tensor")
self.assertEqual(egr_tensor3.persistable, False)
self.assertTrue("new_eager_tensor" in egr_tensor3.name)
self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor3.place._equals(place))
self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor3.stop_gradient, True)
egr_tensor4 = core.eager.EagerTensor(
arr, place=place, persistable=True, name="new_eager_tensor")
self.assertEqual(egr_tensor4.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor4.name)
self.assertEqual(egr_tensor4.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor4.place._equals(place))
self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor4.stop_gradient, True)
egr_tensor5 = core.eager.EagerTensor(
arr,
core.CPUPlace(),
persistable=True,
name="new_eager_tensor",
zero_copy=True)
self.assertEqual(egr_tensor5.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor5.name)
self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor5.place.is_cpu_place())
self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor5.stop_gradient, True)
egr_tensor6 = core.eager.EagerTensor(
arr,
place=core.CPUPlace(),
persistable=True,
name="new_eager_tensor",
zero_copy=True)
self.assertEqual(egr_tensor6.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor6.name)
self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor6.place.is_cpu_place())
self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor6.stop_gradient, True)
egr_tensor7 = core.eager.EagerTensor(
arr,
place=place,
persistable=True,
name="new_eager_tensor",
zero_copy=True)
self.assertEqual(egr_tensor7.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor7.name)
self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor7.place._equals(place))
self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor7.stop_gradient, True)
egr_tensor8 = core.eager.EagerTensor(
arr,
place=place,
persistable=True,
name="new_eager_tensor",
zero_copy=True,
stop_gradient=False)
self.assertEqual(egr_tensor8.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor8.name)
self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor8.place._equals(place))
self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor8.stop_gradient, False)
egr_tensor9 = core.eager.EagerTensor(
arr, place, True, True, "new_eager_tensor", stop_gradient=False)
self.assertEqual(egr_tensor9.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor9.name)
self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor9.place._equals(place))
self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor9.stop_gradient, False)
egr_tensor10 = core.eager.EagerTensor(
arr,
place,
True,
True,
name="new_eager_tensor",
stop_gradient=False)
self.assertEqual(egr_tensor10.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor10.name)
self.assertEqual(egr_tensor10.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor10.place._equals(place))
self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor10.stop_gradient, False)
egr_tensor11 = core.eager.EagerTensor(
arr,
place,
True,
zero_copy=True,
name="new_eager_tensor",
stop_gradient=False)
self.assertEqual(egr_tensor11.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor11.name)
self.assertEqual(egr_tensor11.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor11.place._equals(place))
self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor11.stop_gradient, False)
egr_tensor12 = core.eager.EagerTensor(
arr,
place,
persistable=True,
zero_copy=True,
name="new_eager_tensor",
stop_gradient=False)
self.assertEqual(egr_tensor12.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor12.name)
self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor12.place._equals(place))
self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor12.stop_gradient, False)
egr_tensor13 = core.eager.EagerTensor(
value=arr,
place=place,
persistable=True,
zero_copy=True,
name="new_eager_tensor",
stop_gradient=False)
self.assertEqual(egr_tensor13.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor13.name)
self.assertEqual(egr_tensor13.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor13.place._equals(place))
self.assertEqual(egr_tensor13.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor13.stop_gradient, False)
# special case
egr_tensor14 = core.eager.EagerTensor(
dtype=core.VarDesc.VarType.FP32,
dims=[4, 16, 16, 32],
name="special_eager_tensor",
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=True)
self.assertEqual(egr_tensor14.persistable, True)
self.assertEqual(egr_tensor14.name, "special_eager_tensor")
self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32)
# init EagerTensor by EagerTensor
egr_tensor15 = core.eager.EagerTensor(value=egr_tensor4)
self.assertEqual(egr_tensor15.persistable, True)
self.assertTrue("generated" in egr_tensor15.name)
self.assertEqual(egr_tensor15.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor15.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor15.stop_gradient, True)
self.assertTrue(
egr_tensor15.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(
np.array_equal(egr_tensor15.numpy(), egr_tensor4.numpy()))
egr_tensor16 = core.eager.EagerTensor(
value=egr_tensor4, name="new_eager_tensor")
self.assertEqual(egr_tensor16.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor16.name)
self.assertEqual(egr_tensor16.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor16.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor16.stop_gradient, True)
self.assertTrue(
egr_tensor16.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(
np.array_equal(egr_tensor16.numpy(), egr_tensor4.numpy()))
egr_tensor17 = core.eager.EagerTensor(
value=egr_tensor4,
place=place,
name="new_eager_tensor", )
self.assertEqual(egr_tensor17.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor17.name)
self.assertEqual(egr_tensor17.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor17.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor17.stop_gradient, True)
self.assertTrue(egr_tensor17.place._equals(place))
self.assertTrue(
np.array_equal(egr_tensor17.numpy(), egr_tensor4.numpy()))
egr_tensor18 = core.eager.EagerTensor(
egr_tensor4,
place=place,
name="new_eager_tensor", )
self.assertEqual(egr_tensor18.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor18.name)
self.assertEqual(egr_tensor18.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor18.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor18.stop_gradient, True)
self.assertTrue(egr_tensor18.place._equals(place))
self.assertTrue(
np.array_equal(egr_tensor18.numpy(), egr_tensor4.numpy()))
egr_tensor19 = core.eager.EagerTensor(
egr_tensor4,
place,
name="new_eager_tensor", )
self.assertEqual(egr_tensor19.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor19.name)
self.assertEqual(egr_tensor19.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor19.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor19.stop_gradient, True)
self.assertTrue(egr_tensor19.place._equals(place))
self.assertTrue(
np.array_equal(egr_tensor19.numpy(), egr_tensor4.numpy()))
# init eager tensor by framework tensor
x = np.random.rand(3, 3).astype('float32')
t = paddle.fluid.Tensor()
t.set(x, paddle.fluid.CPUPlace())
egr_tensor20 = core.eager.EagerTensor(value=t)
self.assertEqual(egr_tensor20.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor20.name)
self.assertEqual(egr_tensor20.shape, [3, 3])
self.assertEqual(egr_tensor20.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor20.stop_gradient, True)
self.assertTrue(
egr_tensor20.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(np.array_equal(egr_tensor20.numpy(), x))
egr_tensor21 = core.eager.EagerTensor(value=t, place=place)
self.assertEqual(egr_tensor21.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor21.name)
self.assertEqual(egr_tensor21.shape, [3, 3])
self.assertEqual(egr_tensor21.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor21.stop_gradient, True)
self.assertTrue(egr_tensor21.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor21.numpy(), x))
egr_tensor22 = core.eager.EagerTensor(t, place=place)
self.assertEqual(egr_tensor22.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor22.name)
self.assertEqual(egr_tensor22.shape, [3, 3])
self.assertEqual(egr_tensor22.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor22.stop_gradient, True)
self.assertTrue(egr_tensor22.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor22.numpy(), x))
egr_tensor23 = core.eager.EagerTensor(
t, place, name="from_framework_tensor")
self.assertEqual(egr_tensor23.persistable, False)
self.assertTrue("from_framework_tensor" in egr_tensor23.name)
self.assertEqual(egr_tensor23.shape, [3, 3])
self.assertEqual(egr_tensor23.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor23.stop_gradient, True)
self.assertTrue(egr_tensor23.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor23.numpy(), x))
egr_tensor24 = core.eager.EagerTensor(
value=t, place=place, name="from_framework_tensor")
self.assertEqual(egr_tensor24.persistable, False)
self.assertTrue("from_framework_tensor" in egr_tensor24.name)
self.assertEqual(egr_tensor24.shape, [3, 3])
self.assertEqual(egr_tensor24.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor24.stop_gradient, True)
self.assertTrue(egr_tensor24.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor24.numpy(), x))
# Bad usage
# SyntaxError: positional argument follows keyword argument
# egr_tensor25 = core.eager.EagerTensor(value=t, place)
def test_constructor_with_kwargs(self):
print("Test_constructor_with_kwargs")
paddle.set_device("cpu")
place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list:
self.constructor_with_kwargs(p)
def test_copy_and_copy_to(self):
print("Test_copy_and_copy_to")
with _test_eager_guard():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册