未验证 提交 887a3511 编写于 作者: W wanghuancoder 提交者: GitHub

fix eng doc for some api (#28477)

* fix eng doc, test=develop

* add import deprecated for layers, test=develop

* add block line for doc generate, test=develop

* remove todo for create_variable, test=develop

* add blank line for doc generate, test=develop

* add blank line for doc generate, test=develop
上级 991345b3
......@@ -32,6 +32,7 @@ from ..param_attr import ParamAttr
from paddle.fluid.executor import Executor, global_scope
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.framework import _current_expected_place as _get_device
import paddle.utils.deprecated as deprecated
__all__ = ['Layer']
......@@ -388,20 +389,74 @@ class Layer(core.Layer):
return self._helper.create_parameter(temp_attr, shape, dtype, is_bias,
default_initializer)
# TODO: Add more parameter list when we need them
@deprecated(
since="2.0.0",
update_to="paddle.nn.Layer.create_tensor",
reason="New api in create_tensor, easier to use.")
def create_variable(self, name=None, persistable=None, dtype=None):
"""Create Variable for this layer.
"""
Create Tensor for this layer.
Parameters:
name(str, optional): name of the variable. Please refer to :ref:`api_guide_Name` . Default: None
persistable(bool, optional): if set this variable persistable. Default: False
name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None
persistable(bool, optional): if set this tensor persistable. Default: False
dtype(str, optional): data type of this parameter. If set str, it can be "bool", "float16", "float32", "float64","int8", "int16", "int32", "int64", "uint8" or "uint16". If set None, it will be "float32". Default: None
Returns:
Tensor, created Tensor.
Examples:
.. code-block:: python
import paddle
class MyLinear(paddle.nn.Layer):
def __init__(self,
in_features,
out_features):
super(MyLinear, self).__init__()
self.linear = paddle.nn.Linear( 10, 10)
self.back_var = self.create_variable(name = "linear_tmp_0", dtype=self._dtype)
def forward(self, input):
out = self.linear(input)
paddle.assign( out, self.back_var)
return out
"""
if name is not None:
var_name = ".".join([self._full_name, name])
else:
var_name = unique_name.generate(".".join(
[self._full_name, "_generated_var"]))
return self._helper.main_program.current_block().create_var(
name=var_name,
persistable=persistable,
dtype=dtype,
type=core.VarDesc.VarType.LOD_TENSOR)
# TODO: Add more parameter list when we need them
def create_tensor(self, name=None, persistable=None, dtype=None):
"""
Create Tensor for this layer.
Parameters:
name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None
persistable(bool, optional): if set this tensor persistable. Default: False
dtype(str, optional): data type of this parameter.
If set str, it can be "bool", "float16", "float32", "float64",
"int8", "int16", "int32", "int64", "uint8" or "uint16".
If set None, it will be "float32". Default: None
Returns:
Tensor, created Variable.
Tensor, created Tensor.
Examples:
.. code-block:: python
......@@ -415,7 +470,7 @@ class Layer(core.Layer):
super(MyLinear, self).__init__()
self.linear = paddle.nn.Linear( 10, 10)
self.back_var = self.create_variable(name = "linear_tmp_0", dtype=self._dtype)
self.back_var = self.create_tensor(name = "linear_tmp_0", dtype=self._dtype)
def forward(self, input):
out = self.linear(input)
......@@ -1053,7 +1108,7 @@ class Layer(core.Layer):
def __dir__(self):
"""
Return a list. Get all parameters, buffers(non-parameter variables), sublayers, method and attr of Layer.
Return a list. Get all parameters, buffers(non-parameter tensors), sublayers, method and attr of Layer.
Examples:
.. code-block:: python
......
......@@ -2387,21 +2387,21 @@ class BilinearTensorProduct(layers.Layer):
**bias** (Parameter): the learnable bias of this layer.
Returns:
Variable: A 2-D Tensor of shape [batch_size, size].
Tensor: A 2-D Tensor of shape [batch_size, size].
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy
with fluid.dygraph.guard():
layer1 = numpy.random.random((5, 5)).astype('float32')
layer2 = numpy.random.random((5, 4)).astype('float32')
bilinearTensorProduct = fluid.dygraph.nn.BilinearTensorProduct(
bilinearTensorProduct = paddle.nn.BilinearTensorProduct(
input1_dim=5, input2_dim=4, output_dim=1000)
ret = bilinearTensorProduct(fluid.dygraph.base.to_variable(layer1),
fluid.dygraph.base.to_variable(layer2))
ret = bilinearTensorProduct(paddle.to_tensor(layer1),
paddle.to_tensor(layer2))
"""
def __init__(self,
......
......@@ -725,7 +725,6 @@ def bilinear(x1, x2, weight, bias=None, name=None):
import numpy
import paddle.nn.functional as F
paddle.disable_static()
x1 = numpy.random.random((5, 5)).astype('float32')
x2 = numpy.random.random((5, 4)).astype('float32')
w = numpy.random.random((1000, 5, 4)).astype('float32')
......
......@@ -425,7 +425,6 @@ class Bilinear(layers.Layer):
import paddle
import numpy
paddle.disable_static()
layer1 = numpy.random.random((5, 5)).astype('float32')
layer2 = numpy.random.random((5, 4)).astype('float32')
bilinear = paddle.nn.Bilinear(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册