未验证 提交 6f439e5a 编写于 作者: J Jiabin Yang 提交者: GitHub

Support multi place constructor (#38171)

* support more eager tensor api

* support multiple constructor for eager tensor

* add place related code

* polish code

* specific randint with dtype of int64

* Support pure cpu test

* refine test in pure cpu

* refine test in pure cpu
上级 b613c31e
......@@ -22,6 +22,7 @@
#include "paddle/pten/api/all.h"
#include "paddle/pten/api/lib/api_declare.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/core/convert_utils.h"
/**
* This class is used by Eager mode for now. It's painful to do this in Eager
* Mode, the better
......@@ -57,6 +58,7 @@ class EagerTensor final {
explicit EagerTensor(const std::shared_ptr<pten::TensorBase>& tensor_impl)
: tensor_(std::make_shared<paddle::experimental::Tensor>(tensor_impl)),
var_(paddle::framework::Variable()) {}
EagerTensor(const EagerTensor&) = default;
EagerTensor(EagerTensor&&) = default;
......@@ -163,6 +165,37 @@ class EagerTensor final {
*/
void reset() { tensor_->reset(); }
/**
* @brief Transfer the current Tensor to the specified device and return.
*
* @param place, the target place of which the tensor will copy to.
* @return Tensor
*/
// TODO(chenweihang): replace Backend by new Place
EagerTensor copy_to(pten::Backend backend, bool blocking) const {
if (Var().IsInitialized()) {
const_cast<EagerTensor*>(this)->SyncToTensor();
}
return EagerTensor(tensor_->copy_to(backend, blocking));
}
/**
* @brief Transfer the source Tensor to current Tensor.
*
* @param src, the source Tensor to be copied.
* @param blocking, Should we copy this in sync way.
* @return void
*/
void copy_(const EagerTensor& src, const bool blocking) {
if (src.Var().IsInitialized()) {
const_cast<EagerTensor*>(&src)->SyncToTensor();
}
if (Var().IsInitialized()) {
SyncToTensor();
}
tensor_->copy_(*(src.tensor_.get()), blocking);
}
/* Part 6: Operator overloading */
EagerTensor& operator=(const EagerTensor& x) & {
tensor_ = x.tensor_;
......@@ -270,6 +303,16 @@ class EagerTensor final {
}
private:
/**
* @description: Use a pten::Tensor pointer to construct a EagerTensor, never
* public this!!!!.
* @param {pten::Tensor} tensor
* @return {EagerTensor}
*/
explicit EagerTensor(const paddle::experimental::Tensor& tensor)
: tensor_(std::make_shared<paddle::experimental::Tensor>(tensor)),
var_(paddle::framework::Variable()) {}
std::shared_ptr<paddle::experimental::Tensor> tensor_ = nullptr;
paddle::framework::Variable var_;
};
......
......@@ -26,15 +26,21 @@ limitations under the License. */
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
#include "paddle/fluid/framework/python_headers.h"
#include "paddle/fluid/pybind/eager_op_function_impl.h"
#include "paddle/fluid/pybind/tensor_py.h"
#include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
namespace paddle {
namespace pybind {
namespace py = ::pybind11;
PyTypeObject* p_eager_tensor_type;
extern PyTypeObject* g_vartype_pytype;
PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
PyObject* obj = type->tp_alloc(type, 0);
......@@ -45,6 +51,401 @@ PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
return obj;
}
// TODO(jiabin): Overload this once we need more constructor in Python
void EmptyEagerTensorInitializer(
EagerTensorObject* self, const std::string& name,
const paddle::platform::Place& place, bool persistable = false,
bool stop_gradient = true, framework::proto::VarType::Type dtype =
paddle::framework::proto::VarType::FP32,
const std::vector<int>& dims = {},
framework::proto::VarType::Type var_type =
paddle::framework::proto::VarType::LOD_TENSOR) {
self->eager_tensor.set_name(name);
auto autograd_meta = egr::EagerUtils::autograd_meta(&(self->eager_tensor));
autograd_meta->SetPersistable(persistable);
autograd_meta->SetStopGradient(stop_gradient);
if (var_type == paddle::framework::proto::VarType::LOD_TENSOR) {
// TODO(jiabin): Maybe support LOD later
std::shared_ptr<pten::DenseTensor> dense_tensor =
std::make_shared<pten::DenseTensor>(
pten::make_intrusive<paddle::experimental::SharedStorage>(place),
pten::DenseTensorMeta(pten::TransToPtenDataType(dtype),
paddle::framework::make_ddim(dims)));
self->eager_tensor.set_impl(dense_tensor);
}
}
void InitEagerTensorWithNumpyValue(EagerTensorObject* self,
const py::object& array,
bool zero_copy = false) {
PADDLE_ENFORCE_EQ(
self->eager_tensor.defined(), true,
paddle::platform::errors::Fatal(
"Calling InitEagerTensorWithNumpyValue of Eager Tensor without "
"EmptyEagerTensorInitializer is "
"forbidden. Please check your code and make sure you new a "
"eager tensor before init it with NumPy."));
pten::DenseTensor* impl_ptr =
static_cast<pten::DenseTensor*>(self->eager_tensor.impl().get());
paddle::platform::Place place = impl_ptr->place();
paddle::framework::LoDTensor temp_tensor = paddle::framework::LoDTensor();
if (platform::is_cpu_place(place)) {
SetTensorFromPyArray<platform::CPUPlace>(
&temp_tensor, array, BOOST_GET_CONST(platform::CPUPlace, place),
zero_copy);
} else if (platform::is_xpu_place(place)) {
SetTensorFromPyArray<platform::XPUPlace>(
&temp_tensor, array, BOOST_GET_CONST(platform::XPUPlace, place),
zero_copy);
} else if (platform::is_gpu_place(place)) {
SetTensorFromPyArray<platform::CUDAPlace>(
&temp_tensor, array, BOOST_GET_CONST(platform::CUDAPlace, place),
zero_copy);
} else if (platform::is_cuda_pinned_place(place)) {
SetTensorFromPyArray<platform::CUDAPinnedPlace>(
&temp_tensor, array, BOOST_GET_CONST(platform::CUDAPinnedPlace, place),
zero_copy);
} else if (platform::is_npu_place(place)) {
SetTensorFromPyArray<platform::NPUPlace>(
&temp_tensor, array, BOOST_GET_CONST(platform::NPUPlace, place),
zero_copy);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Place should be one of "
"CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/NPUPlace"));
}
paddle::experimental::ReMakePtenDenseTensor(temp_tensor, impl_ptr);
}
void InitEagerTensorWithEagerTensor(EagerTensorObject* self,
const egr::EagerTensor& src,
const paddle::platform::Place& place,
const std::string& name) {
self->eager_tensor.set_name(name);
if (place == src.place()) {
auto impl = std::static_pointer_cast<pten::DenseTensor>(src.impl());
self->eager_tensor.set_impl(impl);
VLOG(4) << "Same place, do ShareDataWith";
} else {
self->eager_tensor.set_impl(
src.copy_to(pten::TransToPtenBackend(place), true).impl());
VLOG(4) << "Different place, do TensorCopy";
}
egr::EagerUtils::autograd_meta(&(self->eager_tensor))->SetStopGradient(true);
if (src.get_autograd_meta()) {
egr::EagerUtils::unsafe_autograd_meta(self->eager_tensor)
->SetPersistable(
egr::EagerUtils::unsafe_autograd_meta(src)->Persistable());
} else {
egr::EagerUtils::unsafe_autograd_meta(self->eager_tensor)
->SetPersistable(false);
}
}
// TODO(jiabin): We have to do some ugly work, refactor this method using
// PyArg_ParseTuple(),PyArg_ParseTupleAndKeywords() and PyArg_Parse() later to
// support kwargs.
int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwds) {
/** We should have init function with signature:
* 1.
* def __init__ ()
* 2.
* def __init__ (
* ** dtype: paddle::framework::proto::VarType::Type,
* ** dims: vector<int>,
* ** name: std::string,
* ** type: paddle::framework::proto::VarType::Type,
* ** persistable: bool)
* 3. (multi-place) (must have first 2 parameter)
* def __init__ (
* ** value: ndarray,
* ** place: paddle::platform::Place,
* ** persistable: bool,
* ** zero_copy: bool,
* ** name: std::string,
* ** stop_gradient: bool)
* 4.
* def __init__ (
* ** value: ndarray)
* 5.
* def __init__ (
* ** tensor: EagerTensor)
* 6. (multi-place) (must have first 2 parameter)
* def __init__ (
* ** tensor: EagerTensor,
* ** place: paddle::platform::Place,
* ** name: std::string)
* **/
PADDLE_ENFORCE_NOT_NULL(
self, paddle::platform::errors::Fatal(
"Calling __init__ of Eager Tensor without __new__ is "
"forbidden. Please check your code and make sure you new a "
"eager tensor before init it."));
auto py_tensor_ptr = reinterpret_cast<EagerTensorObject*>(self);
// TODO(jiabin): Only support case 2 for now
Py_ssize_t args_num = PyTuple_Size(args);
switch (args_num) {
case (Py_ssize_t)0: {
// case 1
VLOG(6) << "Calling case1's initializer.";
EmptyEagerTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
egr::Controller::Instance().GetExpectedPlace());
return 0;
}
case (Py_ssize_t)1: {
// case 4, 5
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case4's initializer.";
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray. "
"But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value = py::object(py::handle(arg0_ptr), true);
EmptyEagerTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
egr::Controller::Instance().GetExpectedPlace());
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value,
/** zero copy **/ false);
return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
p_eager_tensor_type))) {
VLOG(6) << "Calling case5's initializer.";
auto src_tensor = CastPyArg2EagerTensor(arg0_ptr, 0);
InitEagerTensorWithEagerTensor(
py_tensor_ptr, src_tensor,
egr::Controller::Instance().GetExpectedPlace(),
egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or tensor with "
"python args by this initializer, "
"please check your input first and make sure you are on the right "
"way."));
}
return 0;
}
case (Py_ssize_t)2: {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's initializer.";
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray. "
"But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value = py::object(py::handle(arg0_ptr), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
EmptyEagerTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
place);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value,
/** zero copy **/ false);
return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
p_eager_tensor_type))) {
VLOG(6) << "Calling case6's initializer.";
auto src_tensor = CastPyArg2EagerTensor(arg0_ptr, 0);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
InitEagerTensorWithEagerTensor(
py_tensor_ptr, src_tensor, place,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or tensor with "
"python args by this initializer, "
"please check your input first and make sure you are on the right "
"way."));
}
}
case (Py_ssize_t)3: {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's initializer.";
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray. "
"But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value = py::object(py::handle(arg0_ptr), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
EmptyEagerTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
place, persistable);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value,
/** zero copy **/ false);
return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
p_eager_tensor_type))) {
VLOG(6) << "Calling case6's initializer.";
auto src_tensor = CastPyArg2EagerTensor(arg0_ptr, 0);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 2);
if (name_obj == Py_None) {
act_name = egr::Controller::Instance().GenerateUniqueName(
"generated_tensor");
} else {
act_name = CastPyArg2AttrString(name_obj, 2);
}
InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place,
act_name);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or tensor with "
"python args by this initializer, "
"please check your input first and make sure you are on the right "
"way."));
}
}
case (Py_ssize_t)4: {
VLOG(6) << "Calling case3's initializer.";
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray, "
"\n ** place: paddle::platform::Place, \n ** persistable: bool, "
"\n ** zero_copy: bool, \n ** name: std::string, \n ** "
"stop_gradient: bool. But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value =
py::object(py::handle(PyTuple_GET_ITEM(args, 0)), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
bool zero_copy = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
EmptyEagerTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
place, persistable);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
return 0;
}
case (Py_ssize_t)5: {
// case 2
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (PyObject_IsInstance(arg0_ptr,
reinterpret_cast<PyObject*>(g_vartype_pytype))) {
VLOG(6) << "Calling case2's initializer.";
paddle::framework::proto::VarType::Type dtype =
CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 0), 0);
std::vector<int> dims =
CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 1), 1);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 2);
if (name_obj == Py_None) {
act_name = egr::Controller::Instance().GenerateUniqueName(
"generated_tensor");
} else {
act_name = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 2), 2);
}
paddle::framework::proto::VarType::Type var_type =
CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 3), 3);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
EmptyEagerTensorInitializer(
py_tensor_ptr, act_name,
egr::Controller::Instance().GetExpectedPlace(), persistable, true,
dtype, dims, var_type);
return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
p_eager_tensor_type))) {
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray, "
"\n ** place: paddle::platform::Place, \n ** persistable: "
"bool, \n ** zero_copy: bool, \n ** name: std::string, \n ** "
"stop_gradient: bool. But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value =
py::object(py::handle(PyTuple_GET_ITEM(args, 0)), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
bool zero_copy = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 4);
if (name_obj == Py_None) {
act_name = egr::Controller::Instance().GenerateUniqueName(
"generated_tensor");
} else {
act_name = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 4), 4);
}
EmptyEagerTensorInitializer(py_tensor_ptr, act_name, place,
persistable);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We only support construct tensor from numpy value or dtype with "
"python args by this initializer, "
"please check your input first and make sure you are on the right "
"way."));
}
return 0;
}
case (Py_ssize_t)6: {
// case 3
VLOG(6) << "Calling case3's initializer.";
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
PADDLE_ENFORCE_EQ(
pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr), true,
paddle::platform::errors::Fatal(
"We expected initial parametes list like: \n **value: ndarray, "
"\n ** place: paddle::platform::Place, \n ** persistable: bool, "
"\n ** zero_copy: bool, \n ** name: std::string, \n ** "
"stop_gradient: bool. But got value with wrong type: %s",
reinterpret_cast<PyTypeObject*>(arg0_ptr->ob_type)->tp_name));
py::object numpy_value =
py::object(py::handle(PyTuple_GET_ITEM(args, 0)), true);
paddle::platform::Place place =
CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
bool zero_copy = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
std::string act_name = "";
PyObject* name_obj = PyTuple_GET_ITEM(args, 4);
if (name_obj == Py_None) {
act_name =
egr::Controller::Instance().GenerateUniqueName("generated_tensor");
} else {
act_name = CastPyArg2AttrString(name_obj, 4);
}
bool stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 5), 5);
EmptyEagerTensorInitializer(py_tensor_ptr, act_name, place, persistable,
stop_gradient);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
return 0;
}
default: {
PADDLE_THROW(platform::errors::Fatal(
"Can't not find expected num of args, please check your call, and "
"make sure u call the existed constructor."));
return 1;
}
}
}
static void eagertensor_dealloc(EagerTensorObject* self) {
self->eager_tensor.~EagerTensor();
Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self));
......@@ -90,7 +491,7 @@ PyTypeObject eager_tensor_type = {
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
EagerTensorInit, /* tp_init */
0, /* tp_alloc */
EagerTensorNew, /* tp_new */
0, /* tp_free */
......
......@@ -19,6 +19,7 @@ limitations under the License. */
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
......@@ -89,19 +90,57 @@ static PyObject* eager_tensor_method_numpy(EagerTensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_is_initialized(EagerTensorObject* self,
PyObject* args,
PyObject* kwargs) {
static PyObject* eager_tensor_method__is_initialized(EagerTensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
return ToPyObject(self->eager_tensor.initialized());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method__copy_to(EagerTensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0);
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
auto cp_tensor =
self->eager_tensor.copy_to(pten::TransToPtenBackend(place), blocking);
egr::EagerUtils::autograd_meta(&cp_tensor)->SetStopGradient(true);
egr::EagerUtils::autograd_meta(&cp_tensor)
->SetPersistable(
egr::EagerUtils::autograd_meta(&(self->eager_tensor))->Persistable());
return ToPyObject(cp_tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_copy_(EagerTensorObject* self,
PyObject* args, PyObject* kwargs) {
EAGER_SYNC_TRY
egr::EagerTensor src_tensor =
CastPyArg2EagerTensor(PyTuple_GET_ITEM(args, 0), 0);
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1);
self->eager_tensor.copy_(src_tensor, blocking);
egr::EagerUtils::autograd_meta(&(self->eager_tensor))
->SetStopGradient(
egr::EagerUtils::autograd_meta(&(src_tensor))->StopGradient());
egr::EagerUtils::autograd_meta(&(self->eager_tensor))
->SetPersistable(
egr::EagerUtils::autograd_meta(&(src_tensor))->Persistable());
Py_INCREF(Py_None);
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef variable_methods[] = {
{"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_initialized",
(PyCFunction)(void (*)(void))eager_tensor_method_is_initialized,
(PyCFunction)(void (*)(void))eager_tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_copy_to", (PyCFunction)(void (*)(void))eager_tensor_method__copy_to,
METH_VARARGS | METH_KEYWORDS, NULL},
{"copy_", (PyCFunction)(void (*)(void))eager_tensor_method_copy_,
METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}};
......
......@@ -213,6 +213,47 @@ std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
item, reinterpret_cast<PyObject*>(p_eager_tensor_type))) {
result.emplace_back(
reinterpret_cast<EagerTensorObject*>(item)->eager_tensor);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"list of EagerTensor, but got %s at pos %d",
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(item->ob_type)->tp_name, i));
}
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"list or tuple, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
return result;
}
std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos) {
std::vector<int> result;
if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
if (PyObject_CheckLongOrConvertToLong(&item)) {
result.emplace_back(static_cast<int>(PyLong_AsLong(item)));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"list of int, but got %s at pos %d",
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(item->ob_type)->tp_name, i));
}
}
} else if (PyTuple_Check(obj)) {
Py_ssize_t len = PyTuple_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyTuple_GetItem(obj, i);
if (PyObject_CheckLongOrConvertToLong(&item)) {
result.emplace_back(static_cast<int>(PyLong_AsLong(item)));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
......
......@@ -35,6 +35,7 @@ egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos);
std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
ssize_t arg_pos);
platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos);
std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos);
framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
ssize_t arg_pos);
PyObject* ToPyObject(int value);
......
......@@ -341,7 +341,9 @@ class PADDLE_API Tensor final {
void set_name(const std::string& name) { name_ = name; }
/* Part 5: Data Transform methods */
/* Alert!!!!: All copy method can only deep copy impl, autograd info only be
* copied */
/* out of pten */
/**
* @brief Copy the current Tensor data to the specified device
* and return the new Tensor. It's usually used to set the input tensor data.
......@@ -361,12 +363,20 @@ class PADDLE_API Tensor final {
/**
* @brief Transfer the current Tensor to the specified device and return.
*
* @param place, the target place of which the tensor will copy to.
* @param backend, The target backend of which the tensor will copy to.
* @param blocking, Should we copy this in sync way.
* @return Tensor
*/
// TODO(chenweihang): replace Backend by new Place
Tensor copy_to(Backend backend, bool blocking) const;
/**
* @brief Transfer the source Tensor to current Tensor.
*
* @param src, the source Tensor to be copied.
* @param blocking, Should we copy this in sync way.
* @return void
*/
void copy_(const Tensor& src, const bool blocking);
/**
* @brief Cast datatype from one to another
*
......
......@@ -24,6 +24,7 @@ limitations under the License. */
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/core/compat_utils.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/tensor_base.h"
#include "paddle/pten/core/tensor_meta.h"
......@@ -321,6 +322,31 @@ Tensor Tensor::copy_to(Backend backend, bool blocking) const {
return experimental::copy_to(*this, backend, blocking);
}
void Tensor::copy_(const Tensor &src, bool blocking) {
if (!src.is_initialized()) {
return;
}
VLOG(3) << "Deep copy Tensor from " << src.name() << " to " << name();
if (defined()) {
PADDLE_ENFORCE_EQ(dtype(),
src.dtype(),
platform::errors::PreconditionNotMet(
"Tensor %s has different data type with Tensor %s, "
"Tensor Copy cannot be performed!",
name(),
src.name()));
PADDLE_ENFORCE_EQ(impl()->type_info().id(),
src.impl()->type_info().id(),
platform::errors::PreconditionNotMet(
"Tensor %s has different type with Tensor %s, Tensor "
"Copy cannot be performed!",
name(),
src.name()));
}
auto copy_tensor =
src.copy_to(pten::TransToPtenBackend(src.inner_place()), blocking);
set_impl(copy_tensor.impl());
}
Tensor Tensor::cast(DataType target_type) const {
return experimental::cast(*this, target_type);
}
......
......@@ -389,6 +389,28 @@ void ReMakePtenDenseTensor(const paddle::framework::Tensor& src,
}
}
void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src,
pten::DenseTensor* dst) {
auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst);
meta->dims = src.dims();
// Since the type of DenseTensorMeta is const, const_cast must be used
const_cast<DataType&>(meta->dtype) = pten::TransToPtenDataType(src.type());
// Since the type of DenseTensorMeta is const, const_cast must be used
const_cast<DataLayout&>(meta->layout) =
pten::TransToPtenDataLayout(src.layout());
auto* shared_storage = static_cast<SharedStorage*>(
pten::CompatibleDenseTensorUtils::UnsafeGetMutableStorage(dst));
PADDLE_ENFORCE_NOT_NULL(
shared_storage,
platform::errors::NotFound(
"Target DenseTensor's shared storage is nullptr."));
if (src.IsInitialized()) {
shared_storage->ResetAllocation(src.Holder(), src.offset());
}
}
void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src,
const pten::TensorArgDef& arg_def,
pten::DenseTensor* dst) {
......
......@@ -72,6 +72,8 @@ void MovesSharedStorage(pten::DenseTensor* src,
* the overhead caused by frequent construction and destruction of the
* DenseTensor.
*/
void ReMakePtenDenseTensor(const paddle::framework::LoDTensor& src,
pten::DenseTensor* dst);
void ReMakePtenDenseTensor(const paddle::framework::Tensor& src,
const pten::TensorArgDef& arg_def,
......
......@@ -81,6 +81,173 @@ class EagerDtypeTestCase(unittest.TestCase):
class EagerTensorPropertiesTestCase(unittest.TestCase):
def constructor(self, place):
egr_tensor = core.eager.EagerTensor()
self.assertEqual(egr_tensor.persistable, False)
self.assertTrue("generated" in egr_tensor.name)
self.assertEqual(egr_tensor.shape, [])
self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor.stop_gradient, True)
egr_tensor0 = core.eager.EagerTensor(
core.VarDesc.VarType.FP32, [4, 16, 16, 32], "test_eager_tensor",
core.VarDesc.VarType.LOD_TENSOR, True)
self.assertEqual(egr_tensor0.persistable, True)
self.assertEqual(egr_tensor0.name, "test_eager_tensor")
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor1 = core.eager.EagerTensor(arr0, place, True, False,
"numpy_tensor1", False)
self.assertEqual(egr_tensor1.persistable, True)
self.assertEqual(egr_tensor1.name, "numpy_tensor1")
self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor1.stop_gradient, False)
self.assertTrue(egr_tensor1.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0))
arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64)
egr_tensor2 = core.eager.EagerTensor(arr1, place, False, True,
"numpy_tensor2", True)
self.assertEqual(egr_tensor2.persistable, False)
self.assertEqual(egr_tensor2.name, "numpy_tensor2")
self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.INT64)
self.assertEqual(egr_tensor2.stop_gradient, True)
self.assertTrue(egr_tensor2.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1))
arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32')
egr_tensor3 = core.eager.EagerTensor(arr2)
self.assertEqual(egr_tensor3.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor3.name)
self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64])
self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor3.stop_gradient, True)
self.assertTrue(
egr_tensor3.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2))
egr_tensor3.stop_gradient = False
egr_tensor4 = core.eager.EagerTensor(egr_tensor3)
self.assertEqual(egr_tensor4.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor4.name)
self.assertEqual(egr_tensor4.shape, egr_tensor3.shape)
self.assertEqual(egr_tensor4.dtype, egr_tensor3.dtype)
self.assertEqual(egr_tensor4.stop_gradient, True)
self.assertTrue(
egr_tensor4.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(
np.array_equal(egr_tensor4.numpy(), egr_tensor3.numpy()))
arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor5 = core.eager.EagerTensor(arr4, place)
self.assertEqual(egr_tensor5.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor5.name)
self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor5.stop_gradient, True)
self.assertTrue(egr_tensor5.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4))
egr_tensor6 = core.eager.EagerTensor(egr_tensor5, core.CPUPlace())
self.assertEqual(egr_tensor6.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor6.name)
self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor6.stop_gradient, True)
self.assertEqual(egr_tensor6.place.is_cpu_place(), True)
self.assertTrue(
np.array_equal(egr_tensor6.numpy(), egr_tensor5.numpy()))
egr_tensor7 = core.eager.EagerTensor(arr4, place, True)
self.assertEqual(egr_tensor7.persistable, True)
self.assertTrue("generated_tensor" in egr_tensor7.name)
self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor7.stop_gradient, True)
self.assertTrue(egr_tensor7.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4))
egr_tensor8 = core.eager.EagerTensor(egr_tensor6, place, "egr_tensor8")
self.assertEqual(egr_tensor8.persistable, False)
self.assertEqual(egr_tensor8.name, "egr_tensor8")
self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor8.stop_gradient, True)
self.assertTrue(egr_tensor8.place._equals(place))
self.assertTrue(
np.array_equal(egr_tensor8.numpy(), egr_tensor5.numpy()))
egr_tensor9 = core.eager.EagerTensor(arr4, place, True, True)
self.assertEqual(egr_tensor9.persistable, True)
self.assertTrue("generated_tensor" in egr_tensor9.name)
self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor9.stop_gradient, True)
self.assertTrue(egr_tensor9.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4))
def test_constructor(self):
print("Test_constructor")
paddle.set_device("cpu")
place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list:
self.constructor(p)
def test_copy_and_copy_to(self):
print("Test_copy_and_copy_to")
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
core.CPUPlace())
self.assertEqual(tensor.stop_gradient, True)
tensor.stop_gradient = False
print("Set persistable")
tensor.persistable = False
tensor1 = paddle.to_tensor(arr1, core.VarDesc.VarType.FP32,
core.CPUPlace())
tensor1.persistable = True
self.assertEqual(tensor1.stop_gradient, True)
self.assertTrue(np.array_equal(tensor.numpy(), arr))
print("Test copy_")
tensor.copy_(tensor1, True)
self.assertEqual(tensor.persistable, True)
self.assertEqual(tensor.shape, [4, 16])
self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32)
self.assertTrue(np.array_equal(tensor.numpy(), arr1))
print("Test _copy_to")
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CPUPlace())
self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
self.assertTrue(tensor2.place.is_cpu_place())
tensor2.persistable = True
tensor2.stop_gradient = False
if core.is_compiled_with_cuda():
tensor3 = tensor2._copy_to(True, core.CUDAPlace(0))
self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
self.assertTrue(tensor3.persistable, True)
self.assertTrue(tensor3.stop_gradient, True)
self.assertTrue(tensor3.place.is_gpu_place())
else:
tensor3 = tensor2._copy_to(True, core.CPUPlace())
self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
self.assertTrue(tensor3.persistable, True)
self.assertTrue(tensor3.stop_gradient, True)
self.assertTrue(tensor3.place.is_cpu_place())
def test_properties(self):
print("Test_properties")
with _test_eager_guard():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册