未验证 提交 20e5ef62 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #11483 from wanghaoshuang/origin/whs_doc

Fix doc of relu, log and zeros.
...@@ -25,72 +25,21 @@ import utils ...@@ -25,72 +25,21 @@ import utils
import random import random
__all__ = [ __all__ = [
'fc', 'fc', 'embedding', 'dynamic_lstm', 'dynamic_lstmp', 'dynamic_gru',
'embedding', 'gru_unit', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'cross_entropy',
'dynamic_lstm', 'square_error_cost', 'chunk_eval', 'sequence_conv', 'conv2d', 'conv3d',
'dynamic_lstmp', 'sequence_pool', 'sequence_softmax', 'softmax', 'pool2d', 'pool3d',
'dynamic_gru', 'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'conv3d_transpose',
'gru_unit', 'sequence_expand', 'lstm_unit', 'reduce_sum', 'reduce_mean', 'reduce_max',
'linear_chain_crf', 'reduce_min', 'reduce_prod', 'sequence_first_step', 'sequence_last_step',
'crf_decoding', 'dropout', 'split', 'ctc_greedy_decoder', 'edit_distance', 'l2_normalize',
'cos_sim', 'matmul', 'topk', 'warpctc', 'sequence_reshape', 'transpose', 'im2sequence',
'cross_entropy', 'nce', 'beam_search', 'row_conv', 'multiplex', 'layer_norm',
'square_error_cost', 'softmax_with_cross_entropy', 'smooth_l1', 'one_hot',
'chunk_eval', 'autoincreased_step_counter', 'reshape', 'lod_reset', 'lrn', 'pad',
'sequence_conv', 'label_smooth', 'roi_pool', 'dice_loss', 'image_resize',
'conv2d', 'image_resize_short', 'resize_bilinear', 'gather', 'random_crop',
'conv3d', 'mean_iou', 'relu', 'log'
'sequence_pool',
'sequence_softmax',
'softmax',
'pool2d',
'pool3d',
'batch_norm',
'beam_search_decode',
'conv2d_transpose',
'conv3d_transpose',
'sequence_expand',
'lstm_unit',
'reduce_sum',
'reduce_mean',
'reduce_max',
'reduce_min',
'reduce_prod',
'sequence_first_step',
'sequence_last_step',
'dropout',
'split',
'ctc_greedy_decoder',
'edit_distance',
'l2_normalize',
'matmul',
'topk',
'warpctc',
'sequence_reshape',
'transpose',
'im2sequence',
'nce',
'beam_search',
'row_conv',
'multiplex',
'layer_norm',
'softmax_with_cross_entropy',
'smooth_l1',
'one_hot',
'autoincreased_step_counter',
'reshape',
'lod_reset',
'lrn',
'pad',
'label_smooth',
'roi_pool',
'dice_loss',
'image_resize',
'image_resize_short',
'resize_bilinear',
'gather',
'random_crop',
'mean_iou',
] ]
...@@ -4784,6 +4733,62 @@ def random_crop(x, shape, seed=None): ...@@ -4784,6 +4733,62 @@ def random_crop(x, shape, seed=None):
return out return out
def log(x):
"""
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x)
Args:
x (Variable): Input tensor.
Returns:
Variable: The natural log of the input tensor computed element-wise.
Examples:
.. code-block:: python
output = fluid.layers.log(x)
"""
helper = LayerHelper('log', **locals())
dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype)
helper.append_op(type="log", inputs={"X": input}, outputs={"Out": out})
return out
def relu(x):
"""
Relu takes one input data (Tensor) and produces one output data (Tensor)
where the rectified linear function, y = max(0, x), is applied to
the tensor elementwise.
.. math::
Out = \\max(0, x)
Args:
x (Variable): The input tensor.
Returns:
Variable: The output tensor with the same shape as input.
Examples:
.. code-block:: python
output = fluid.layers.relu(x)
"""
helper = LayerHelper('relu', **locals())
dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype)
helper.append_op(type="relu", inputs={"X": input}, outputs={"Out": out})
return out
def mean_iou(input, label, num_classes): def mean_iou(input, label, num_classes):
""" """
Mean Intersection-Over-Union is a common evaluation metric for Mean Intersection-Over-Union is a common evaluation metric for
...@@ -4810,11 +4815,10 @@ def mean_iou(input, label, num_classes): ...@@ -4810,11 +4815,10 @@ def mean_iou(input, label, num_classes):
out_wrong(Variable): A Tensor with shape [num_classes]. The wrong numbers of each class. out_wrong(Variable): A Tensor with shape [num_classes]. The wrong numbers of each class.
out_correct(Variable): A Tensor with shape [num_classes]. The correct numbers of each class. out_correct(Variable): A Tensor with shape [num_classes]. The correct numbers of each class.
Examples: Examples:
.. code-block:: python .. code-block:: python
iou, wrongs, corrects = fluid.layers.mean_iou(predict, label, num_classes) iou, wrongs, corrects = fluid.layers.mean_iou(predict, label, num_classes)
""" """
helper = LayerHelper('mean_iou', **locals()) helper = LayerHelper('mean_iou', **locals())
......
...@@ -17,7 +17,6 @@ __activations__ = [ ...@@ -17,7 +17,6 @@ __activations__ = [
'sigmoid', 'sigmoid',
'logsigmoid', 'logsigmoid',
'exp', 'exp',
'relu',
'tanh', 'tanh',
'tanh_shrink', 'tanh_shrink',
'softshrink', 'softshrink',
...@@ -29,7 +28,6 @@ __activations__ = [ ...@@ -29,7 +28,6 @@ __activations__ = [
'sin', 'sin',
'round', 'round',
'reciprocal', 'reciprocal',
'log',
'square', 'square',
'softplus', 'softplus',
'softsign', 'softsign',
......
...@@ -453,11 +453,12 @@ def zeros(shape, dtype, force_cpu=False): ...@@ -453,11 +453,12 @@ def zeros(shape, dtype, force_cpu=False):
It also sets *stop_gradient* to True. It also sets *stop_gradient* to True.
Args: Args:
shape(tuple|list|None): Shape of output tensor shape(tuple|list|None): Shape of output tensor.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor.
force_cpu(bool, default False): Whether to make output stay on CPU.
Returns: Returns:
Variable: The tensor variable storing the output Variable: The tensor variable storing the output.
Examples: Examples:
.. code-block:: python .. code-block:: python
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册