未验证 提交 2a5d1d54 编写于 作者: W wanghuancoder 提交者: GitHub

Eager tensor doc5 (#56250)

* add tensor doc
上级 daebb80a
...@@ -545,6 +545,28 @@ static PyObject* tensor_method__copy_to(TensorObject* self, ...@@ -545,6 +545,28 @@ static PyObject* tensor_method__copy_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_reconstruct_from___doc__,
R"DOC(reconstruct_from_($self, other/)
--
Reconstruct the self with other Tensor. It is a deep copy of 'self = other'.
Returns:
None.
Examples:
.. code-block:: python
import paddle
t1 = paddle.to_tensor([1.0], stop_gradient=False)
t2 = paddle.to_tensor([2.0], stop_gradient=True)
t1.reconstruct_from_(t2)
print(t1)
)DOC");
static PyObject* tensor_method_reconstruct_from_(TensorObject* self, static PyObject* tensor_method_reconstruct_from_(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -655,6 +677,38 @@ static PyObject* tensor_method_clone(TensorObject* self, ...@@ -655,6 +677,38 @@ static PyObject* tensor_method_clone(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method_retain_grads__doc__, R"DOC(retain_grads($self, /)
--
Enables this Tensor to have their grad populated during backward(). It is a no-op for leaf tensors.
Returns:
None.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1.0, 2.0, 3.0])
x.stop_gradient = False
y = x + x
y.retain_grads()
loss = y.sum()
loss.backward()
print(y.grad) # [1., 1., 1.]
x = paddle.to_tensor([1.0, 2.0, 3.0])
x.stop_gradient = False
y = x + x
# y.retain_grads()
loss = y.sum()
loss.backward()
print(y.grad) # None
)DOC");
static PyObject* tensor_retain_grads(TensorObject* self, static PyObject* tensor_retain_grads(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -966,6 +1020,16 @@ static PyObject* tensor_method_detach(TensorObject* self, ...@@ -966,6 +1020,16 @@ static PyObject* tensor_method_detach(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method_detach___doc__, R"DOC(detach_($self, /)
--
Detach self from the current graph, and returns self Tensor.
In addition, the detached Tensor doesn't provide gradient propagation.
Returns:
Tensor, The detached Tensor.
)DOC");
static PyObject* tensor_method_detach_(TensorObject* self, static PyObject* tensor_method_detach_(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -985,6 +1049,24 @@ static PyObject* tensor_method_detach_(TensorObject* self, ...@@ -985,6 +1049,24 @@ static PyObject* tensor_method_detach_(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method_get_tensor__doc__, R"DOC(get_tensor($self, /)
--
Returns the underline tensor in the origin Tensor.
Returns:
Underline tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1.0], stop_gradient=False)
underline_x = x.get_tensor()
print(underline_x) # a Dense Tensor info
)DOC");
static PyObject* tensor_method_get_underline_tensor(TensorObject* self, static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -1940,6 +2022,23 @@ static PyObject* tensor_method_get_non_zero_cols(TensorObject* self, ...@@ -1940,6 +2022,23 @@ static PyObject* tensor_method_get_non_zero_cols(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method_is_dense__doc__, R"DOC(is_dense($self, /)
--
Whether the Tensor is a Dense Tensor.
Returns:
Whether the Tensor is a Dense Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1.0], stop_gradient=False)
print(x.is_dense())
)DOC");
static PyObject* tensor_method_is_dense(TensorObject* self, static PyObject* tensor_method_is_dense(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -1951,6 +2050,23 @@ static PyObject* tensor_method_is_dense(TensorObject* self, ...@@ -1951,6 +2050,23 @@ static PyObject* tensor_method_is_dense(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_method_is_dist__doc__, R"DOC(is_dist($self, /)
--
Whether the Tensor is a Distributed Tensor.
Returns:
Whether the Tensor is a Distributed Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1.0], stop_gradient=False)
print(x.is_dist()) # False
)DOC");
static PyObject* tensor_method_is_dist(TensorObject* self, static PyObject* tensor_method_is_dist(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -2249,6 +2365,24 @@ static PyObject* tensor__unset_fake_empty(TensorObject* self, ...@@ -2249,6 +2365,24 @@ static PyObject* tensor__unset_fake_empty(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_data_ptr__doc__,
R"DOC(data_ptr($self, /)
--
Returns the address of the first element of current Tensor.
Returns:
int, The address of the first element of current Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
print(x.data_ptr())
)DOC");
static PyObject* tensor_data_ptr(TensorObject* self, static PyObject* tensor_data_ptr(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -2278,6 +2412,25 @@ static PyObject* tensor__grad_ivar(TensorObject* self, ...@@ -2278,6 +2412,25 @@ static PyObject* tensor__grad_ivar(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_get_strides__doc__,
R"DOC(get_strides($self, /)
--
Returns the strides of current Tensor.
Returns:
List, the strides of current Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = x[1]
print(y.get_strides())
)DOC");
static PyObject* tensor_method_strides(TensorObject* self, static PyObject* tensor_method_strides(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -2296,6 +2449,27 @@ static PyObject* tensor_method_strides(TensorObject* self, ...@@ -2296,6 +2449,27 @@ static PyObject* tensor_method_strides(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_contiguous__doc__,
R"DOC(contiguous($self, /)
--
Returns a contiguous in memory tensor containing the same data as current Tensor.
If self tensor is already contiguous, this function returns the current Tensor.
Returns:
Tensor, The contiguous Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = x[1]
y = y.contiguous()
print(y)
)DOC");
static PyObject* tensor_contiguous(TensorObject* self, static PyObject* tensor_contiguous(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -2320,6 +2494,24 @@ static PyObject* tensor_contiguous(TensorObject* self, ...@@ -2320,6 +2494,24 @@ static PyObject* tensor_contiguous(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_is_contiguous__doc__,
R"DOC(is_contiguous($self, /)
--
Whether the Tensor is contiguous.
Returns:
Bool, Whether the Tensor is contiguous.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = x[1]
print(y.is_contiguous())
)DOC");
static PyObject* tensor_is_contiguous(TensorObject* self, static PyObject* tensor_is_contiguous(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -2401,11 +2593,11 @@ PyMethodDef variable_methods[] = { // NOLINT ...@@ -2401,11 +2593,11 @@ PyMethodDef variable_methods[] = { // NOLINT
{"reconstruct_from_", {"reconstruct_from_",
(PyCFunction)(void (*)())tensor_method_reconstruct_from_, (PyCFunction)(void (*)())tensor_method_reconstruct_from_,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_reconstruct_from___doc__},
{"retain_grads", {"retain_grads",
(PyCFunction)(void (*)())tensor_retain_grads, (PyCFunction)(void (*)())tensor_retain_grads,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_method_retain_grads__doc__},
{"clear_gradient", {"clear_gradient",
(PyCFunction)(void (*)())tensor_clear_gradient, (PyCFunction)(void (*)())tensor_clear_gradient,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2413,11 +2605,11 @@ PyMethodDef variable_methods[] = { // NOLINT ...@@ -2413,11 +2605,11 @@ PyMethodDef variable_methods[] = { // NOLINT
{"is_dense", {"is_dense",
(PyCFunction)(void (*)())tensor_method_is_dense, (PyCFunction)(void (*)())tensor_method_is_dense,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_method_is_dense__doc__},
{"is_dist", {"is_dist",
(PyCFunction)(void (*)())tensor_method_is_dist, (PyCFunction)(void (*)())tensor_method_is_dist,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_method_is_dist__doc__},
{"_zero_grads", {"_zero_grads",
(PyCFunction)(void (*)())tensor__zero_grads, (PyCFunction)(void (*)())tensor__zero_grads,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2445,11 +2637,11 @@ PyMethodDef variable_methods[] = { // NOLINT ...@@ -2445,11 +2637,11 @@ PyMethodDef variable_methods[] = { // NOLINT
{"detach_", {"detach_",
(PyCFunction)(void (*)(void))tensor_method_detach_, (PyCFunction)(void (*)(void))tensor_method_detach_,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_method_detach___doc__},
{"get_tensor", {"get_tensor",
(PyCFunction)(void (*)())tensor_method_get_underline_tensor, (PyCFunction)(void (*)())tensor_method_get_underline_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_method_get_tensor__doc__},
{"get_selected_rows", {"get_selected_rows",
(PyCFunction)(void (*)())tensor_method_get_underline_selected_rows, (PyCFunction)(void (*)())tensor_method_get_underline_selected_rows,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2608,7 +2800,7 @@ PyMethodDef variable_methods[] = { // NOLINT ...@@ -2608,7 +2800,7 @@ PyMethodDef variable_methods[] = { // NOLINT
{"data_ptr", {"data_ptr",
(PyCFunction)(void (*)())tensor_data_ptr, (PyCFunction)(void (*)())tensor_data_ptr,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_data_ptr__doc__},
{"_grad_ivar", {"_grad_ivar",
(PyCFunction)(void (*)())tensor__grad_ivar, (PyCFunction)(void (*)())tensor__grad_ivar,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2616,15 +2808,15 @@ PyMethodDef variable_methods[] = { // NOLINT ...@@ -2616,15 +2808,15 @@ PyMethodDef variable_methods[] = { // NOLINT
{"contiguous", {"contiguous",
(PyCFunction)(void (*)(void))tensor_contiguous, (PyCFunction)(void (*)(void))tensor_contiguous,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_contiguous__doc__},
{"is_contiguous", {"is_contiguous",
(PyCFunction)(void (*)(void))tensor_is_contiguous, (PyCFunction)(void (*)(void))tensor_is_contiguous,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_is_contiguous__doc__},
{"get_strides", {"get_strides",
(PyCFunction)(void (*)(void))tensor_method_strides, (PyCFunction)(void (*)(void))tensor_method_strides,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
nullptr}, tensor_get_strides__doc__},
#if defined(PADDLE_WITH_CUDA) #if defined(PADDLE_WITH_CUDA)
{"_tensor_uva", {"_tensor_uva",
(PyCFunction)(void (*)())tensor_method__uva, (PyCFunction)(void (*)())tensor_method__uva,
......
...@@ -40,6 +40,25 @@ namespace pybind { ...@@ -40,6 +40,25 @@ namespace pybind {
extern PyTypeObject* p_tensor_type; extern PyTypeObject* p_tensor_type;
PyDoc_STRVAR(tensor_name__doc__,
R"DOC(name
Tensor's name.
Returns:
str: Tensor's name.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(1.)
print(x.name) # generated_tensor_0
x.name = 'test_tensor_name'
print(x.name) # test_tensor_name
)DOC");
PyObject* tensor_properties_get_name(TensorObject* self, void* closure) { PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
// NOTE(dev): [why not use egr::Controller::Instance::GenerateUniqueName()?] // NOTE(dev): [why not use egr::Controller::Instance::GenerateUniqueName()?]
...@@ -54,6 +73,23 @@ PyObject* tensor_properties_get_name(TensorObject* self, void* closure) { ...@@ -54,6 +73,23 @@ PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_type__doc__,
R"DOC(type
Tensor's type.
Returns:
VarType: Tensor's type.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(1.)
print(x.type) # VarType.LOD_TENSOR
)DOC");
PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
if (!self->tensor.defined()) { if (!self->tensor.defined()) {
...@@ -120,6 +156,25 @@ int tensor_properties_set_name(TensorObject* self, ...@@ -120,6 +156,25 @@ int tensor_properties_set_name(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NEG EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyDoc_STRVAR(tensor_stop_gradient__doc__,
R"DOC(stop_gradient
Tensor's stop_gradient.
Returns:
bool: Tensor's stop_gradient.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(1.)
print(x.stop_gradient) # True
x.stop_gradient = False
print(x.stop_gradient) # False
)DOC");
PyObject* tensor_properties_get_stop_gradient(TensorObject* self, PyObject* tensor_properties_get_stop_gradient(TensorObject* self,
void* closure) { void* closure) {
EAGER_TRY EAGER_TRY
...@@ -128,8 +183,29 @@ PyObject* tensor_properties_get_stop_gradient(TensorObject* self, ...@@ -128,8 +183,29 @@ PyObject* tensor_properties_get_stop_gradient(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_data__doc__,
R"DOC(data
Tensor's self.
Returns:
Tensor: self.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(1.)
print(x)
print(x.data)
x.data = paddle.to_tensor(2.)
print(x)
print(x.data)
)DOC");
PyObject* tensor_properties_get_data(TensorObject* self, void* closure) { PyObject* tensor_properties_get_data(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
Py_INCREF(self);
return reinterpret_cast<PyObject*>(self); return reinterpret_cast<PyObject*>(self);
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -149,6 +225,26 @@ int tensor_properties_set_data(TensorObject* self, ...@@ -149,6 +225,26 @@ int tensor_properties_set_data(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NEG EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyDoc_STRVAR(tensor_grad__doc__,
R"DOC(grad
Tensor's grad Tensor.
Returns:
Tensor: grad Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(1.0, stop_gradient=False)
y = x**2
y.backward()
print(x.grad)
x.grad = paddle.to_tensor(3.0)
print(x.grad)
)DOC");
PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) { PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
VLOG(6) << "Get grad for tensor: " << self->tensor.name(); VLOG(6) << "Get grad for tensor: " << self->tensor.name();
...@@ -215,6 +311,25 @@ int tensor_properties_set_stop_gradient(TensorObject* self, ...@@ -215,6 +311,25 @@ int tensor_properties_set_stop_gradient(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NEG EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyDoc_STRVAR(tensor_persistable__doc__,
R"DOC(persistable
Tensor's persistable.
Returns:
bool: persistable.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(1.0, stop_gradient=False)
print(x.persistable) # False
x. persistable = True
print(x.persistable) # True
)DOC");
PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) { PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
...@@ -273,6 +388,23 @@ PyObject* tensor_properties_get_dist_attr(TensorObject* self, void* closure) { ...@@ -273,6 +388,23 @@ PyObject* tensor_properties_get_dist_attr(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_shape__doc__,
R"DOC(shape
Tensor's shape.
Returns:
List: shape.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(1.0, stop_gradient=False)
print(x.shape)
)DOC");
PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) { PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
std::vector<int64_t> value; std::vector<int64_t> value;
...@@ -342,6 +474,24 @@ PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) { ...@@ -342,6 +474,24 @@ PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_strides__doc__,
R"DOC(strides
Tensor's strides.
Returns:
List: strides.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = x[1]
print(y.strides)
)DOC");
PyObject* tensor_properties_get_strides(TensorObject* self, void* closure) { PyObject* tensor_properties_get_strides(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
std::vector<int64_t> value; std::vector<int64_t> value;
...@@ -361,6 +511,23 @@ PyObject* tensor_properties_get_strides(TensorObject* self, void* closure) { ...@@ -361,6 +511,23 @@ PyObject* tensor_properties_get_strides(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_offset__doc__,
R"DOC(offset
The address of the first element relative to the offset of the video memory.
Returns:
int: offset.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = x[1]
print(y.offset)
)DOC");
PyObject* tensor_properties_get_offset(TensorObject* self, void* closure) { PyObject* tensor_properties_get_offset(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
if (!self->tensor.defined() || !self->tensor.is_dense_tensor()) { if (!self->tensor.defined() || !self->tensor.is_dense_tensor()) {
...@@ -379,6 +546,22 @@ PyObject* tensor_properties_get_offset(TensorObject* self, void* closure) { ...@@ -379,6 +546,22 @@ PyObject* tensor_properties_get_offset(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_layout__doc__,
R"DOC(layout
Tensor's memory layout.
Returns:
Layout: layout.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
print(x.layout)
)DOC");
PyObject* tensor_properties_get_layout(TensorObject* self, void* closure) { PyObject* tensor_properties_get_layout(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
std::string layout = ""; std::string layout = "";
...@@ -397,6 +580,22 @@ PyObject* tensor_properties_get_layout(TensorObject* self, void* closure) { ...@@ -397,6 +580,22 @@ PyObject* tensor_properties_get_layout(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_place__doc__,
R"DOC(place
The device Tensor's memory locate.
Returns:
Place: place.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
print(x.place)
)DOC");
PyObject* tensor_properties_get_place(TensorObject* self, void* closure) { PyObject* tensor_properties_get_place(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
return ToPyObject(self->tensor.place()); return ToPyObject(self->tensor.place());
...@@ -411,6 +610,22 @@ PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) { ...@@ -411,6 +610,22 @@ PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyDoc_STRVAR(tensor_dtype__doc__,
R"DOC(dtype
Tensor's data type.
Returns:
paddle dtype: dtype.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
print(x.dtype)
)DOC");
PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) { PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
if (!self->tensor.defined()) { if (!self->tensor.defined()) {
...@@ -474,12 +689,12 @@ struct PyGetSetDef variable_properties[] = { // NOLINT ...@@ -474,12 +689,12 @@ struct PyGetSetDef variable_properties[] = { // NOLINT
{"data", {"data",
(getter)tensor_properties_get_data, (getter)tensor_properties_get_data,
(setter)tensor_properties_set_data, (setter)tensor_properties_set_data,
nullptr, tensor_data__doc__,
nullptr}, nullptr},
{"grad", {"grad",
(getter)tensor_properties_get_grad, (getter)tensor_properties_get_grad,
(setter)tensor_properties_set_grad, (setter)tensor_properties_set_grad,
nullptr, tensor_grad__doc__,
nullptr}, nullptr},
{"grad_", {"grad_",
(getter)tensor_properties_get_grad, (getter)tensor_properties_get_grad,
...@@ -489,27 +704,43 @@ struct PyGetSetDef variable_properties[] = { // NOLINT ...@@ -489,27 +704,43 @@ struct PyGetSetDef variable_properties[] = { // NOLINT
{"name", {"name",
(getter)tensor_properties_get_name, (getter)tensor_properties_get_name,
(setter)tensor_properties_set_name, (setter)tensor_properties_set_name,
nullptr, tensor_name__doc__,
nullptr}, nullptr},
{"stop_gradient", {"stop_gradient",
(getter)tensor_properties_get_stop_gradient, (getter)tensor_properties_get_stop_gradient,
(setter)tensor_properties_set_stop_gradient, (setter)tensor_properties_set_stop_gradient,
nullptr, tensor_stop_gradient__doc__,
nullptr}, nullptr},
{"persistable", {"persistable",
(getter)tensor_properties_get_persistable, (getter)tensor_properties_get_persistable,
(setter)tensor_properties_set_persistable, (setter)tensor_properties_set_persistable,
tensor_persistable__doc__,
nullptr},
{"shape",
(getter)tensor_properties_get_shape,
nullptr, nullptr,
tensor_shape__doc__,
nullptr},
{"layout",
(getter)tensor_properties_get_layout,
nullptr,
tensor_layout__doc__,
nullptr}, nullptr},
{"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr},
{"layout", (getter)tensor_properties_get_layout, nullptr, nullptr, nullptr},
{"strides", {"strides",
(getter)tensor_properties_get_strides, (getter)tensor_properties_get_strides,
nullptr, nullptr,
tensor_strides__doc__,
nullptr},
{"place",
(getter)tensor_properties_get_place,
nullptr, nullptr,
tensor_place__doc__,
nullptr},
{"offset",
(getter)tensor_properties_get_offset,
nullptr,
tensor_offset__doc__,
nullptr}, nullptr},
{"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr},
{"offset", (getter)tensor_properties_get_offset, nullptr, nullptr, nullptr},
{"dist_attr", {"dist_attr",
(getter)tensor_properties_get_dist_attr, (getter)tensor_properties_get_dist_attr,
nullptr, nullptr,
...@@ -520,8 +751,16 @@ struct PyGetSetDef variable_properties[] = { // NOLINT ...@@ -520,8 +751,16 @@ struct PyGetSetDef variable_properties[] = { // NOLINT
nullptr, nullptr,
nullptr, nullptr,
nullptr}, nullptr},
{"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr}, {"dtype",
{"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr}, (getter)tensor_properties_get_dtype,
nullptr,
tensor_dtype__doc__,
nullptr},
{"type",
(getter)tensor_properties_get_type,
nullptr,
tensor_type__doc__,
nullptr},
{"is_leaf", {"is_leaf",
(getter)tensor_properties_is_leaf, (getter)tensor_properties_is_leaf,
nullptr, nullptr,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册