未验证 提交 5d6d14bc 编写于 作者: W wanghuancoder 提交者: GitHub

[Eager] fix test_var_base (#41397)

* eager test var base

* refine, test=develop
上级 afb56e8c
......@@ -78,6 +78,10 @@ void EmptyTensorInitializer(TensorObject* self, const std::string& name,
phi::DenseTensorMeta(paddle::framework::TransToPhiDataType(dtype),
ddims));
self->tensor.set_impl(dense_tensor);
} else if (var_type == paddle::framework::proto::VarType::SELECTED_ROWS) {
std::shared_ptr<phi::SelectedRows> tensor =
std::make_shared<phi::SelectedRows>();
self->tensor.set_impl(tensor);
}
if (!autograd_meta->GetMutableGradNode()) {
......
......@@ -465,6 +465,9 @@ static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args,
self->tensor.name()));
auto* src_tensor =
static_cast<paddle::framework::Tensor*>(self->tensor.impl().get());
if (!dst_ptr->defined()) {
dst_ptr->set_impl(std::make_shared<phi::DenseTensor>());
}
auto dst_tensor =
static_cast<paddle::framework::Tensor*>(dst_ptr->impl().get());
dst_tensor->ShareDataWith(*src_tensor);
......@@ -565,6 +568,10 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (!self->tensor.defined()) {
Py_IncRef(Py_None);
return Py_None;
}
if (self->tensor.is_dense_tensor()) {
auto* tensor =
static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get());
......@@ -577,6 +584,25 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor_method_get_underline_selected_rows(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (!self->tensor.defined()) {
Py_IncRef(Py_None);
return Py_None;
}
if (self->tensor.is_selected_rows()) {
auto* selected_rows =
static_cast<phi::SelectedRows*>(self->tensor.impl().get());
return ToPyObject(selected_rows);
} else {
Py_IncRef(Py_None);
return Py_None;
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
......@@ -1214,6 +1240,9 @@ static PyObject* tensor_method_get_non_zero_cols(TensorObject* self,
static PyObject* tensor_method_is_sparse(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (!self->tensor.defined()) {
return ToPyObject(false);
}
return ToPyObject(self->tensor.is_sparse_coo_tensor() ||
self->tensor.is_sparse_csr_tensor());
EAGER_CATCH_AND_THROW_RETURN_NULL
......@@ -1222,6 +1251,9 @@ static PyObject* tensor_method_is_sparse(TensorObject* self, PyObject* args,
static PyObject* tensor_method_is_sparse_coo(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (!self->tensor.defined()) {
return ToPyObject(false);
}
return ToPyObject(self->tensor.is_sparse_coo_tensor());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1229,6 +1261,9 @@ static PyObject* tensor_method_is_sparse_coo(TensorObject* self, PyObject* args,
static PyObject* tensor_method_is_sparse_csr(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (!self->tensor.defined()) {
return ToPyObject(false);
}
return ToPyObject(self->tensor.is_sparse_csr_tensor());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1307,6 +1342,9 @@ static PyObject* tensor_method_is_selected_rows(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (!self->tensor.defined()) {
return ToPyObject(false);
}
return ToPyObject(self->tensor.is_selected_rows());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......@@ -1323,6 +1361,13 @@ static PyObject* tensor_method_get_rows(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor_methon_element_size(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
return ToPyObject(paddle::experimental::SizeOf(self->tensor.dtype()));
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor__reset_grad_inplace_version(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
......@@ -1420,6 +1465,9 @@ PyMethodDef variable_methods[] = {
{"get_tensor",
(PyCFunction)(void (*)(void))tensor_method_get_underline_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
{"get_selected_rows",
(PyCFunction)(void (*)(void))tensor_method_get_underline_selected_rows,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_getitem_index_not_tensor",
(PyCFunction)(void (*)(void))tensor__getitem_index_not_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
......@@ -1482,6 +1530,8 @@ PyMethodDef variable_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"rows", (PyCFunction)(void (*)(void))tensor_method_get_rows,
METH_VARARGS | METH_KEYWORDS, NULL},
{"element_size", (PyCFunction)(void (*)(void))tensor_methon_element_size,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_reset_grad_inplace_version",
(PyCFunction)(void (*)(void))tensor__reset_grad_inplace_version,
METH_VARARGS | METH_KEYWORDS, NULL},
......
......@@ -43,8 +43,14 @@ PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_TRY
if (!self->tensor.defined()) {
// be same to old dygraph
return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR);
}
if (self->tensor.is_dense_tensor()) {
return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR);
} else if (self->tensor.is_selected_rows()) {
return ToPyObject(paddle::framework::proto::VarType::SELECTED_ROWS);
} else {
Py_INCREF(Py_None);
return Py_None;
......@@ -137,8 +143,11 @@ int tensor_properties_set_persistable(TensorObject* self, PyObject* value,
PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_TRY
auto ddim = self->tensor.shape();
std::vector<int64_t> value;
if (!self->tensor.defined()) {
return ToPyObject(value);
}
auto ddim = self->tensor.shape();
size_t rank = static_cast<size_t>(ddim.size());
value.resize(rank);
for (size_t i = 0; i < rank; i++) {
......@@ -165,6 +174,10 @@ PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) {
PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) {
EAGER_TRY
if (!self->tensor.defined()) {
// be same to old dygraph
return ToPyObject(framework::proto::VarType::FP32);
}
return ToPyObject(
paddle::framework::TransToProtoVarType(self->tensor.type()));
EAGER_CATCH_AND_THROW_RETURN_NULL
......
......@@ -577,6 +577,12 @@ PyObject* ToPyObject(const paddle::framework::LoDTensor* value) {
return obj.ptr();
}
PyObject* ToPyObject(const phi::SelectedRows* value) {
auto obj = ::pybind11::cast(value, py::return_value_policy::reference);
obj.inc_ref();
return obj.ptr();
}
PyObject* ToPyObject(const void* value) {
if (value == nullptr) {
Py_INCREF(Py_None);
......
......@@ -75,6 +75,7 @@ PyObject* ToPyObject(const std::vector<paddle::experimental::Tensor>& value,
bool return_py_none_if_not_initialize = false);
PyObject* ToPyObject(const platform::Place& value);
PyObject* ToPyObject(const framework::LoDTensor* value);
PyObject* ToPyObject(const phi::SelectedRows* value);
PyObject* ToPyObject(const paddle::framework::proto::VarType::Type& dtype);
PyObject* ToPyObject(const paddle::framework::proto::VarType& type);
PyObject* ToPyObject(const void* value);
......
......@@ -101,7 +101,11 @@ int64_t Tensor::size() const { return impl_->numel(); }
phi::DDim Tensor::dims() const { return impl_->dims(); }
std::vector<int64_t> Tensor::shape() const {
return phi::vectorize<int64_t>(impl_->dims());
auto dims = impl_->dims();
if (dims.size() == 1 && dims.at(0) == 0) {
return {};
}
return phi::vectorize<int64_t>(dims);
}
void Tensor::reshape(const std::vector<int64_t> &shape) {
......
......@@ -846,7 +846,11 @@ def monkey_patch_varbase():
return res
@framework.dygraph_only
def cuda(self, device_id, blocking):
def cuda(self, device_id=0, blocking=True):
if device_id is None:
device_id = 0
if not isinstance(device_id, int):
raise ValueError("\'device_id\' must be a positive integer")
if self.place.is_gpu_place():
return self
else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册