未验证 提交 da71173b 编写于 作者: N Noel 提交者: GitHub

Fix ops doc for some ops

Fix ops doc for some ops 
上级 770395cb
...@@ -203,7 +203,7 @@ $$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ ...@@ -203,7 +203,7 @@ $$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
UNUSED constexpr char SqrtDoc[] = R"DOC( UNUSED constexpr char SqrtDoc[] = R"DOC(
Sqrt Activation Operator. Sqrt Activation Operator.
.. math:: out=\\sqrt{x}=x^{1/2} $$out=\\sqrt{x}=x^{1/2}$$
**Note**: **Note**:
input value must be greater than or equal to zero. input value must be greater than or equal to zero.
...@@ -229,14 +229,14 @@ $$out = |x|$$ ...@@ -229,14 +229,14 @@ $$out = |x|$$
UNUSED constexpr char CeilDoc[] = R"DOC( UNUSED constexpr char CeilDoc[] = R"DOC(
Ceil Operator. Computes ceil of x element-wise. Ceil Operator. Computes ceil of x element-wise.
$$out = \\left \\lceil x \\right \\rceil$$ $$out = \\lceil x \\rceil$$
)DOC"; )DOC";
UNUSED constexpr char FloorDoc[] = R"DOC( UNUSED constexpr char FloorDoc[] = R"DOC(
Floor Activation Operator. Computes floor of x element-wise. Floor Activation Operator. Computes floor of x element-wise.
$$out = \\left \\lfloor x \\right \\rfloor$$ $$out = \\lfloor x \\rfloor$$
)DOC"; )DOC";
...@@ -273,7 +273,7 @@ $$out = cosh(x)$$ ...@@ -273,7 +273,7 @@ $$out = cosh(x)$$
UNUSED constexpr char RoundDoc[] = R"DOC( UNUSED constexpr char RoundDoc[] = R"DOC(
The OP rounds the values in the input to the nearest integer value. The OP rounds the values in the input to the nearest integer value.
.. code-block:: python .. code-block:: text
input: input:
x.shape = [4] x.shape = [4]
...@@ -592,7 +592,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -592,7 +592,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override { void Make() override {
AddInput("X", AddInput("X",
"Input of STanh operator." "Input of STanh operator."
" A LoDTensor or Tensor with type float32, float64."); " A Tensor with type float32, float64.");
AddOutput("Out", "Output of STanh operator. A Tensor with type float32."); AddOutput("Out", "Output of STanh operator. A Tensor with type float32.");
AddAttr<float>("scale_a", "The scale parameter of a for the input. ") AddAttr<float>("scale_a", "The scale parameter of a for the input. ")
.SetDefault(0.67f); .SetDefault(0.67f);
......
...@@ -82,7 +82,7 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -82,7 +82,7 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
AddInput("X", AddInput("X",
"The 1st input of cos_sim op, LoDTensor with shape ``[N_1, N_2, " "The 1st input of cos_sim op, Tensor with shape ``[N_1, N_2, "
"..., N_k]``, the data type is float32."); "..., N_k]``, the data type is float32.");
AddInput("Y", AddInput("Y",
"The 2nd input of cos_sim op, Tensor with shape ``[N_1 or 1, N_2, " "The 2nd input of cos_sim op, Tensor with shape ``[N_1 or 1, N_2, "
...@@ -110,9 +110,6 @@ of input Y could be just 1 (different from input X), which will be ...@@ -110,9 +110,6 @@ of input Y could be just 1 (different from input X), which will be
broadcasted to match the shape of input X before computing their cosine broadcasted to match the shape of input X before computing their cosine
similarity. similarity.
Both the input X and Y can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD information with input X.
)DOC"); )DOC");
} }
}; };
......
...@@ -40,15 +40,11 @@ class ElementwiseMaxOpMaker : public ElementwiseOpMaker { ...@@ -40,15 +40,11 @@ class ElementwiseMaxOpMaker : public ElementwiseOpMaker {
std::string GetEquation() const override { return "Out = max(X, Y)"; } std::string GetEquation() const override { return "Out = max(X, Y)"; }
void AddInputX() override { void AddInputX() override {
AddInput( AddInput("X", "The first tensor holding the elements to be compared.");
"X",
"(Variable), The first tensor holding the elements to be compared.");
} }
void AddInputY() override { void AddInputY() override {
AddInput( AddInput("Y", "The second tensor holding the elements to be compared.");
"Y",
"(Variable), The second tensor holding the elements to be compared.");
} }
std::string GetOpFuntionality() const override { std::string GetOpFuntionality() const override {
......
...@@ -40,15 +40,11 @@ class ElementwiseMinOpMaker : public ElementwiseOpMaker { ...@@ -40,15 +40,11 @@ class ElementwiseMinOpMaker : public ElementwiseOpMaker {
std::string GetEquation() const override { return "Out = min(X, Y)"; } std::string GetEquation() const override { return "Out = min(X, Y)"; }
void AddInputX() override { void AddInputX() override {
AddInput( AddInput("X", "The first tensor holding the elements to be compared.");
"X",
"(Variable), The first tensor holding the elements to be compared.");
} }
void AddInputY() override { void AddInputY() override {
AddInput( AddInput("Y", "The second tensor holding the elements to be compared.");
"Y",
"(Variable), The second tensor holding the elements to be compared.");
} }
std::string GetOpFuntionality() const override { std::string GetOpFuntionality() const override {
......
...@@ -1583,19 +1583,16 @@ def create_array(dtype): ...@@ -1583,19 +1583,16 @@ def create_array(dtype):
@templatedoc() @templatedoc()
def less_than(x, y, force_cpu=None, cond=None, name=None): def less_than(x, y, force_cpu=None, cond=None, name=None):
""" """
:alias_main: paddle.less_than
:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than
:old_api: paddle.fluid.layers.less_than
${comment} ${comment}
Args: Args:
x(${x_type}): ${x_comment}. x(Tensor): ${x_comment}.
y(${y_type}): ${y_comment}. y(Tensor): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}. force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Variable, optional): Optional output which can be any created Variable cond(Tensor, optional): Optional output which can be any created Tensor
that meets the requirements to store the result of *less_than*. that meets the requirements to store the result of *less_than*.
if cond is None, a new Varibale will be created to store the result. if cond is None, a new Tensor will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`. user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
...@@ -1604,25 +1601,13 @@ def less_than(x, y, force_cpu=None, cond=None, name=None): ...@@ -1604,25 +1601,13 @@ def less_than(x, y, force_cpu=None, cond=None, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np
# Graph Organizing
x = fluid.layers.data(name='x', shape=[2], dtype='float64')
y = fluid.layers.data(name='y', shape=[2], dtype='float64')
result = fluid.layers.less_than(x=x, y=y)
# The comment lists another available method.
# result = fluid.layers.fill_constant(shape=[2], dtype='float64', value=0)
# fluid.layers.less_than(x=x, y=y, cond=result)
# Create an executor using CPU as example x = paddle.to_tensor([1, 2, 3, 4], dtype='float32')
exe = fluid.Executor(fluid.CPUPlace()) y = paddle.to_tensor([2, 2, 1, 3], dtype='float32')
result = paddle.less_than(x, y)
print(result) # [True, False, False, False]
# Execute
x_i = np.array([[1, 2], [3, 4]]).astype(np.float64)
y_i = np.array([[2, 2], [1, 3]]).astype(np.float64)
result_value, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[result])
print(result_value) # [[True, False], [False, False]]
""" """
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_than") "less_than")
......
...@@ -55,10 +55,11 @@ _two_bang_pattern_ = re.compile(r"!!([^!]+)!!") ...@@ -55,10 +55,11 @@ _two_bang_pattern_ = re.compile(r"!!([^!]+)!!")
def escape_math(text): def escape_math(text):
return _two_bang_pattern_.sub( #return _two_bang_pattern_.sub(
r'$$\1$$', # r'$$\1$$',
_single_dollar_pattern_.sub(r':math:`\1`', # _single_dollar_pattern_.sub(r':math:\n`\1`',
_two_dollar_pattern_.sub(r"!!\1!!", text))) # _two_dollar_pattern_.sub(r"!!\1!!", text)))
return _two_dollar_pattern_.sub(r':math:`\1`', text)
def _generate_doc_string_(op_proto, def _generate_doc_string_(op_proto,
......
...@@ -377,9 +377,7 @@ def edit_distance(input, ...@@ -377,9 +377,7 @@ def edit_distance(input,
So the edit distance between A and B is 3. So the edit distance between A and B is 3.
The input is a LoDTensor or Tensor. The input is a Tensor, the input_length and label_length should be supported.
If it is a LoDTensor, The separation is specified by the LoD information.
If it is a Tensor, The input_length and label_length should be supported.
The `batch_size` of labels should be same as `input`. The `batch_size` of labels should be same as `input`.
...@@ -388,59 +386,36 @@ def edit_distance(input, ...@@ -388,59 +386,36 @@ def edit_distance(input,
the edit distance value will be divided by the length of label. the edit distance value will be divided by the length of label.
Parameters: Parameters:
input(Variable): The input variable which is a tensor or LoDTensor, its rank should be equal to 2 and its data type should be int64. input(Tensor): The input tensor, its rank should be equal to 2 and its data type should be int64.
label(Variable): The label variable which is a tensor or LoDTensor, its rank should be equal to 2 and its data type should be int64. label(Tensor): The label tensor, its rank should be equal to 2 and its data type should be int64.
normalized(bool, default True): Indicated whether to normalize the edit distance. normalized(bool, default True): Indicated whether to normalize the edit distance.
ignored_tokens(list<int>, default None): Tokens that will be removed before ignored_tokens(list<int>, default None): Tokens that will be removed before
calculating edit distance. calculating edit distance.
input_length(Variable): The length for each sequence in `input` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64. input_length(Tensor): The length for each sequence in `input` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
label_length(Variable): The length for each sequence in `label` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64. label_length(Tensor): The length for each sequence in `label` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
NOTE: To be avoid unexpected result, the value of every elements in input_length and label_length should be equal to the value of the second dimension of input and label. For example, The input: [[1,2,3,4],[5,6,7,8],[9,10,11,12]], the shape of input is [3,4] and the input_length should be [4,4,4] NOTE: To be avoid unexpected result, the value of every elements in input_length and label_length should be equal to the value of the second dimension of input and label. For example, The input: [[1,2,3,4],[5,6,7,8],[9,10,11,12]], the shape of input is [3,4] and the input_length should be [4,4,4]
NOTE: This Api is different from fluid.metrics.EditDistance NOTE: This Api is different from fluid.metrics.EditDistance
Returns: Returns:
Tuple: Tuple:
distance(Variable): edit distance result, its data type is float32, and its shape is (batch_size, 1). distance(Tensor): edit distance result, its data type is float32, and its shape is (batch_size, 1).
sequence_num(Variable): sequence number, its data type is float32, and its shape is (1,). sequence_num(Tensor): sequence number, its data type is float32, and its shape is (1,).
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np import paddle.nn.functional as F
# using LoDTensor
x_lod = fluid.data(name='x_lod', shape=[None,1], dtype='int64', lod_level=1)
y_lod = fluid.data(name='y_lod', shape=[None,1], dtype='int64', lod_level=1)
distance_lod, seq_num_lod = fluid.layers.edit_distance(input=x_lod, label=y_lod)
# using Tensor
input_data = np.array([[1,2,3],[4,5,6],[4,4,4],[1,1,1]]).astype('int64')
label_data = np.array([[1,3,4,1],[4,5,8,1],[7,7,7,1],[1,1,1,1]]).astype('int64')
input_len = np.array([3,3,3,3]).astype('int64')
label_len = np.array([4,4,4,4]).astype('int64')
input_t = fluid.data(name='input', shape=[None,3], dtype='int64')
label_t = fluid.data(name='label', shape=[None,4], dtype='int64')
input_len_t = fluid.data(name='input_length', shape=[None], dtype='int64')
label_len_t = fluid.data(name='label_length', shape=[None], dtype='int64')
distance, sequence_num = fluid.layers.edit_distance(input=input_t, label=label_t, input_length=input_len_t, label_length=label_len_t,normalized=False) input = paddle.to_tensor([[1,2,3],[4,5,6],[4,4,4],[1,1,1]], dtype='int64')
label = paddle.to_tensor([[1,3,4,1],[4,5,8,1],[7,7,7,1],[1,1,1,1]], dtype='int64')
input_len = paddle.to_tensor([3,3,3,3], dtype='int64')
label_len = paddle.to_tensor([4,4,4,4], dtype='int64')
# print(input_data.shape, label_data.shape) distance, sequence_num = F.loss.edit_distance(input=input, label=label, input_length=input_len, label_length=label_len, normalized=False)
# ((4,3), (4,4))
place = fluid.CPUPlace() # print(distance)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
dis, seq_num = exe.run(fluid.default_main_program(),
feed={"input":input_data,
"label":label_data,
"input_length": input_len,
"label_length": label_len},
fetch_list=[distance,sequence_num])
# print(dis)
# [[3.] # [[3.]
# [2.] # [2.]
# [4.] # [4.]
...@@ -451,7 +426,7 @@ def edit_distance(input, ...@@ -451,7 +426,7 @@ def edit_distance(input,
# [1. ] # [1. ]
# [0.25] # [0.25]
# #
# print(seq_num) # print(sequence_num)
# [4] # [4]
""" """
...@@ -1434,18 +1409,15 @@ def sigmoid_cross_entropy_with_logits(x, ...@@ -1434,18 +1409,15 @@ def sigmoid_cross_entropy_with_logits(x,
name=None, name=None,
normalize=False): normalize=False):
""" """
:alias_main: paddle.nn.functional.sigmoid_cross_entropy_with_logits
:alias: paddle.nn.functional.sigmoid_cross_entropy_with_logits,paddle.nn.functional.loss.sigmoid_cross_entropy_with_logits
:old_api: paddle.fluid.layers.sigmoid_cross_entropy_with_logits
${comment} ${comment}
Args: Args:
x(Variable): a 2-D tensor with shape N x D, where N is the batch size and x(Tensor): a 2-D tensor with shape N x D, where N is the batch size and
D is the number of classes. This input is a tensor of logits computed D is the number of classes. This input is a tensor of logits computed
by the previous operator. Logits are unscaled log probabilities given by the previous operator. Logits are unscaled log probabilities given
as log(p/(1-p)) The data type should be float32 or float64. as log(p/(1-p)) The data type should be float32 or float64.
label (Variable): a 2-D tensor of the same type and shape as X. label (Tensor): a 2-D tensor of the same type and shape as X.
This input is a tensor of probabalistic labels for each logit. This input is a tensor of probabalistic labels for each logit.
ignore_index(int): Specifies a target value that is ignored and ignore_index(int): Specifies a target value that is ignored and
does not contribute to the input gradient. does not contribute to the input gradient.
...@@ -1456,22 +1428,19 @@ def sigmoid_cross_entropy_with_logits(x, ...@@ -1456,22 +1428,19 @@ def sigmoid_cross_entropy_with_logits(x,
targets != ignore_index. targets != ignore_index.
Returns: Returns:
out(${out_type}): ${out_comment} out(Tensor): ${out_comment}
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid
input = fluid.data( import paddle
name='data', shape=[10], dtype='float32')
label = fluid.data( input = paddle.rand(shape=[10], dtype='float32')
name='data', shape=[10], dtype='float32') label = paddle.rand(shape=[10], dtype='float32')
loss = fluid.layers.sigmoid_cross_entropy_with_logits( loss = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(input, label,
x=input, ignore_index=-1, normalize=True)
label=label, print(loss)
ignore_index=-1,
normalize=True) # or False
# loss = fluid.layers.reduce_sum(loss) # summation of loss
""" """
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'sigmoid_cross_entropy_with_logits') 'sigmoid_cross_entropy_with_logits')
...@@ -1619,47 +1588,44 @@ def huber_loss(input, label, delta): ...@@ -1619,47 +1588,44 @@ def huber_loss(input, label, delta):
@templatedoc() @templatedoc()
def kldiv_loss(x, target, reduction='mean', name=None): def kldiv_loss(x, target, reduction='mean', name=None):
""" """
:alias_main: paddle.nn.functional.kldiv_loss
:alias: paddle.nn.functional.kldiv_loss,paddle.nn.functional.loss.kldiv_loss
:old_api: paddle.fluid.layers.kldiv_loss
${comment} ${comment}
Args: Args:
x (Variable): ${x_comment} x (Tensor): ${x_comment}
target (Variable): ${target_comment} target (Tensor): ${target_comment}
reduction (Variable): ${reduction_comment} reduction (Tensor): ${reduction_comment}
name(str, optional): For detailed information, please refer name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and to :ref:`api_guide_Name`. Usually name is no need to set and
None by default. None by default.
Returns: Returns:
Variable(Tensor): The KL divergence loss. The data type is same as input tensor Tensor: The KL divergence loss. The data type is same as input tensor
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
# 'batchmean' reduction, loss shape will be [N] x = paddle.rand(shape=[3,4,2,2], dtype='float32')
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] target = paddle.rand(shape=[3,4,2,2], dtype='float32')
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean') # shape=[-1] # 'batchmean' reduction, loss shape will be [1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean')
print(loss.shape) # shape=[1]
# 'mean' reduction, loss shape will be [1] # 'mean' reduction, loss shape will be [1]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean')
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') print(loss.shape) # shape=[1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean') # shape=[1]
# 'sum' reduction, loss shape will be [1] # 'sum' reduction, loss shape will be [1]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum')
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') print(loss.shape) # shape=[1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum') # shape=[1]
# 'none' reduction, loss shape is same with X shape # 'none' reduction, loss shape is same with X shape
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none')
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') print(loss.shape) # shape=[3, 4, 2, 2]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none') # shape=[-1, 4, 2, 2]
""" """
helper = LayerHelper('kldiv_loss', **locals()) helper = LayerHelper('kldiv_loss', **locals())
......
...@@ -912,19 +912,22 @@ def cos_sim(X, Y): ...@@ -912,19 +912,22 @@ def cos_sim(X, Y):
${comment} ${comment}
Args: Args:
X (Variable): ${x_comment}. X (Tensor): ${x_comment}.
Y (Variable): ${y_comment}. Y (Tensor): ${y_comment}.
Returns: Returns:
A Variable holding LoDTensor representing the output of cosine(X, Y). A Tensor representing the output of cosine(X, Y).
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
x = fluid.data(name='x', shape=[3, 7], dtype='float32')
y = fluid.data(name='y', shape=[1, 7], dtype='float32') x = paddle.rand(shape=[3, 7], dtype='float32')
out = fluid.layers.cos_sim(x, y) y = paddle.rand(shape=[1, 7], dtype='float32')
out = paddle.fluid.layers.cos_sim(x, y)
print(out)
""" """
check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim') check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim')
check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim') check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim')
...@@ -1116,12 +1119,11 @@ def chunk_eval(input, ...@@ -1116,12 +1119,11 @@ def chunk_eval(input,
type correctly. type correctly.
Args: Args:
input (Variable): A Tensor or LoDTensor, representing the predicted labels input (Tensor): A Tensor representing the predicted labels
from the network. When it is a Tensor, its shape would be `[N, M, 1]`, from the network. Its shape would be `[N, M, 1]`,
where `N` stands for batch size, `M` for sequence length; When it is where `N` stands for batch size, `M` for sequence length.
a LoDTensor, its shape would be `[N, 1]` where `N` stands for the total The data type should be int64.
sequence lengths in this mini-batch. The data type should be int64. label (Tensor): A Tensor representing the ground-truth labels.
label (Variable): A Tensor or LoDTensor representing the ground-truth labels.
It should have the same shape, lod and data type as ``input`` . It should have the same shape, lod and data type as ``input`` .
chunk_scheme (str): Indicate the tagging schemes used here. The value must chunk_scheme (str): Indicate the tagging schemes used here. The value must
be IOB, IOE, IOBES or plain. be IOB, IOE, IOBES or plain.
...@@ -1129,9 +1131,8 @@ def chunk_eval(input, ...@@ -1129,9 +1131,8 @@ def chunk_eval(input,
excluded_chunk_types (list, optional): Indicate the chunk types shouldn't excluded_chunk_types (list, optional): Indicate the chunk types shouldn't
be taken into account. It should be a list of chunk type ids(integer). be taken into account. It should be a list of chunk type ids(integer).
Default None. Default None.
seq_length(Variable, optional): A 1D Tensor containing the length of each seq_length(Tensor, optional): A 1D Tensor containing the length of each
sequence when ``input`` and ``label`` are Tensor. It needn't be sequence when ``input`` and ``label`` are Tensor. Default None.
provided if ``input`` and ``label`` are LoDTensor. Default None.
Returns: Returns:
tuple: A tuple including precision, recall, F1-score, chunk number detected, \ tuple: A tuple including precision, recall, F1-score, chunk number detected, \
...@@ -1230,7 +1231,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1): ...@@ -1230,7 +1231,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1):
.. math:: .. math::
Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])} Out[i, j] = \\frac{\\exp(X[i, j])}{\\sum_j(exp(X[i, j])}
Example: Example:
...@@ -1280,7 +1281,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1): ...@@ -1280,7 +1281,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1):
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]] [0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Args: Args:
input (Variable): The input variable. A multi-dimension ``Tensor`` with type float32 or float64. input (Tensor): The input tensor. A multi-dimension ``Tensor`` with type float32 or float64.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \ use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. To improve numerical stability, set use_cudnn to \ library is installed. To improve numerical stability, set use_cudnn to \
False by default. False by default.
...@@ -1288,27 +1289,33 @@ def softmax(input, use_cudnn=True, name=None, axis=-1): ...@@ -1288,27 +1289,33 @@ def softmax(input, use_cudnn=True, name=None, axis=-1):
will be named automatically. Default: None. will be named automatically. Default: None.
axis (int, optional): The index of dimension to perform softmax calculations, it should axis (int, optional): The index of dimension to perform softmax calculations, it should
be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
input variable. Default: -1. -1 means the last dimension. input tensor. Default: -1. -1 means the last dimension.
Returns: Returns:
Variable: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` . Tensor: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np import paddle.nn.functional as F
x = paddle.to_tensor([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]], dtype='float32')
y = F.softmax(x, axis=1)
print(y)
# [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
# [0.01786798, 0.01786798, 0.04661262, 0.04661262],
# [0.97555870, 0.97555870, 0.93623954, 0.93623954]],
# [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
# [0.26762316, 0.26762316, 0.26762316, 0.26762316],
# [0.72747517, 0.72747517, 0.72747517, 0.72747517]]]
data = fluid.data(name="input", shape=[-1, 3],dtype="float32")
result = fluid.layers.softmax(data,axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3, 3).astype("float32")
output= exe.run(feed={"input": x},
fetch_list=[result[0]])
print(output)
""" """
if in_dygraph_mode(): if in_dygraph_mode():
...@@ -9539,9 +9546,6 @@ def pow(x, factor=1.0, name=None): ...@@ -9539,9 +9546,6 @@ def pow(x, factor=1.0, name=None):
@templatedoc() @templatedoc()
def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
""" """
:alias_main: paddle.stanh
:alias: paddle.stanh,paddle.tensor.stanh,paddle.tensor.math.stanh
:old_api: paddle.fluid.layers.stanh
${comment} ${comment}
Args: Args:
...@@ -9552,27 +9556,24 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): ...@@ -9552,27 +9556,24 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
will be named automatically. will be named automatically.
Returns: Returns:
output(${out_type}): ${out_comment}. output(Tensor): ${out_comment}.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np
data = fluid.data(name="input", shape=[-1, 3])
result = fluid.layers.stanh(data,scale_a=0.67, scale_b=1.72)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.random(size=(3, 3)).astype('float32')
output= exe.run(feed={"input": x},
fetch_list=[result])
print(output)
#[array([[0.626466 , 0.89842904, 0.7501062 ], data = paddle.rand(shape=[3, 3], dtype='float32')
# [0.25147712, 0.7484996 , 0.22902708], output = paddle.stanh(data, scale_a=0.67, scale_b=1.72)
# [0.62705994, 0.23110689, 0.56902856]], dtype=float32)] print(data)
# [[0.19412413, 0.66871136, 0.77059180],
# [0.89738929, 0.35827777, 0.60592669],
# [0.66346580, 0.78424633, 0.46533889]]
print(output)
# [[0.22245567, 0.72288811, 0.81671900],
# [0.92525512, 0.40512756, 0.66227961],
# [0.71790355, 0.82885355, 0.51953089]]
""" """
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
...@@ -9857,20 +9858,12 @@ def leaky_relu(x, alpha=0.02, name=None): ...@@ -9857,20 +9858,12 @@ def leaky_relu(x, alpha=0.02, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np
# Graph Organizing
x = fluid.layers.data(name="x", shape=[2], dtype="float32")
res = fluid.layers.leaky_relu(x, alpha=0.1)
# Create an executor using CPU as an example x = paddle.to_tensor([[-1, 2], [3, -4]], dtype='float32')
exe = fluid.Executor(fluid.CPUPlace()) y = paddle.fluid.layers.leaky_relu(x, alpha=0.1)
print(y) # [[-0.1, 2], [3, -0.4]]
# Execute
x_i = np.array([[-1, 2], [3, -4]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[-0.1, 2], [3, -0.4]]
""" """
return paddle.nn.functional.leaky_relu(x, alpha, name) return paddle.nn.functional.leaky_relu(x, alpha, name)
...@@ -12172,11 +12165,10 @@ def logical_and(x, y, out=None, name=None): ...@@ -12172,11 +12165,10 @@ def logical_and(x, y, out=None, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([True]) x = paddle.to_tensor([True])
y = paddle.to_tensor([True, False, True, False]) y = paddle.to_tensor([True, False, True, False])
res = paddle.logical_and(x, y) res = paddle.logical_and(x, y)
print(res.numpy()) # [True False True False] print(res) # [True False True False]
""" """
return _logical_op( return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True) op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
...@@ -12210,13 +12202,12 @@ def logical_or(x, y, out=None, name=None): ...@@ -12210,13 +12202,12 @@ def logical_or(x, y, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
x_data = np.array([True, False], dtype=np.bool).reshape(2, 1) x_data = np.array([True, False], dtype=np.bool).reshape(2, 1)
y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2) y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2)
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
res = paddle.logical_or(x, y) res = paddle.logical_or(x, y)
print(res.numpy()) # [[ True True] [ True False]] print(res) # [[ True True] [ True False]]
""" """
return _logical_op( return _logical_op(
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True) op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)
...@@ -12250,13 +12241,12 @@ def logical_xor(x, y, out=None, name=None): ...@@ -12250,13 +12241,12 @@ def logical_xor(x, y, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
x_data = np.array([True, False], dtype=np.bool).reshape([2, 1]) x_data = np.array([True, False], dtype=np.bool).reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2]) y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2])
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
res = paddle.logical_xor(x, y) res = paddle.logical_xor(x, y)
print(res.numpy()) # [[False, True], [ True, False]] print(res) # [[False, True], [ True, False]]
""" """
return _logical_op( return _logical_op(
op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True) op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)
...@@ -12265,9 +12255,6 @@ def logical_xor(x, y, out=None, name=None): ...@@ -12265,9 +12255,6 @@ def logical_xor(x, y, out=None, name=None):
@templatedoc() @templatedoc()
def logical_not(x, out=None, name=None): def logical_not(x, out=None, name=None):
""" """
:alias_main: paddle.logical_not
:alias: paddle.logical_not, paddle.tensor.logical_not, paddle.tensor.logic.logical_not
:old_api: paddle.fluid.layers.logical_not
``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``x`` and ``out`` are N-dim boolean ``Variable``. ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``x`` and ``out`` are N-dim boolean ``Variable``.
Each element of ``out`` is calculated by Each element of ``out`` is calculated by
...@@ -12277,21 +12264,21 @@ def logical_not(x, out=None, name=None): ...@@ -12277,21 +12264,21 @@ def logical_not(x, out=None, name=None):
out = !x out = !x
Args: Args:
x(${x_type}): ${x_comment}. x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool.
out(Variable): The ``Variable`` that specifies the output of the operator, which can be any ``Variable`` that has been created in the program. The default value is None, and a new ``Variable` will be created to save the output. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output.
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
${out_type}: ${out_comment} Tensor: ${out_comment}
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([True, False, True, False]) x = paddle.to_tensor([True, False, True, False])
res = paddle.logical_not(x) res = paddle.logical_not(x)
print(res.numpy()) # [False True False True] print(res) # [False True False True]
""" """
return _logical_op( return _logical_op(
......
...@@ -91,11 +91,10 @@ Examples: ...@@ -91,11 +91,10 @@ Examples:
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.sigmoid(x) out = F.sigmoid(x)
print(out.numpy()) print(out)
# [0.40131234 0.450166 0.52497919 0.57444252] # [0.40131234 0.450166 0.52497919 0.57444252]
""") """)
...@@ -106,11 +105,10 @@ Examples: ...@@ -106,11 +105,10 @@ Examples:
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.log_sigmoid(x) out = F.log_sigmoid(x)
print(out.numpy()) print(out)
# [-0.91301525 -0.79813887 -0.64439666 -0.55435524] # [-0.91301525 -0.79813887 -0.64439666 -0.55435524]
""") """)
...@@ -120,11 +118,10 @@ Examples: ...@@ -120,11 +118,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.exp(x) out = paddle.exp(x)
print(out.numpy()) print(out)
# [0.67032005 0.81873075 1.10517092 1.34985881] # [0.67032005 0.81873075 1.10517092 1.34985881]
""") """)
...@@ -134,11 +131,10 @@ Examples: ...@@ -134,11 +131,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.tanh(x) out = paddle.tanh(x)
print(out.numpy()) print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261] # [-0.37994896 -0.19737532 0.09966799 0.29131261]
""") """)
...@@ -148,11 +144,10 @@ Examples: ...@@ -148,11 +144,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.atan(x) out = paddle.atan(x)
print(out.numpy()) print(out)
# [-0.38050638 -0.19739556 0.09966865 0.29145679] # [-0.38050638 -0.19739556 0.09966865 0.29145679]
""") """)
...@@ -164,8 +159,6 @@ Examples: ...@@ -164,8 +159,6 @@ Examples:
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
...@@ -176,11 +169,10 @@ Examples: ...@@ -176,11 +169,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4]) x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
out = paddle.sqrt(x) out = paddle.sqrt(x)
print(out.numpy()) print(out)
# [0.31622777 0.4472136 0.54772256 0.63245553] # [0.31622777 0.4472136 0.54772256 0.63245553]
""") """)
...@@ -202,11 +194,10 @@ Examples: ...@@ -202,11 +194,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.abs(x) out = paddle.abs(x)
print(out.numpy()) print(out)
# [0.4 0.2 0.1 0.3] # [0.4 0.2 0.1 0.3]
""") """)
...@@ -216,11 +207,10 @@ Examples: ...@@ -216,11 +207,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.ceil(x) out = paddle.ceil(x)
print(out.numpy()) print(out)
# [-0. -0. 1. 1.] # [-0. -0. 1. 1.]
""") """)
...@@ -230,11 +220,10 @@ Examples: ...@@ -230,11 +220,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.floor(x) out = paddle.floor(x)
print(out.numpy()) print(out)
# [-1. -1. 0. 0.] # [-1. -1. 0. 0.]
""") """)
...@@ -244,11 +233,10 @@ Examples: ...@@ -244,11 +233,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.cos(x) out = paddle.cos(x)
print(out.numpy()) print(out)
# [0.92106099 0.98006658 0.99500417 0.95533649] # [0.92106099 0.98006658 0.99500417 0.95533649]
""") """)
...@@ -258,11 +246,10 @@ Examples: ...@@ -258,11 +246,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.acos(x) out = paddle.acos(x)
print(out.numpy()) print(out)
# [1.98231317 1.77215425 1.47062891 1.26610367] # [1.98231317 1.77215425 1.47062891 1.26610367]
""") """)
...@@ -272,11 +259,10 @@ Examples: ...@@ -272,11 +259,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.sin(x) out = paddle.sin(x)
print(out.numpy()) print(out)
# [-0.38941834 -0.19866933 0.09983342 0.29552021] # [-0.38941834 -0.19866933 0.09983342 0.29552021]
""") """)
...@@ -286,11 +272,10 @@ Examples: ...@@ -286,11 +272,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.asin(x) out = paddle.asin(x)
print(out.numpy()) print(out)
# [-0.41151685 -0.20135792 0.10016742 0.30469265] # [-0.41151685 -0.20135792 0.10016742 0.30469265]
""") """)
...@@ -300,11 +285,10 @@ Examples: ...@@ -300,11 +285,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.cosh(x) out = paddle.cosh(x)
print(out.numpy()) print(out)
# [1.08107237 1.02006676 1.00500417 1.04533851] # [1.08107237 1.02006676 1.00500417 1.04533851]
""") """)
...@@ -314,11 +298,10 @@ Examples: ...@@ -314,11 +298,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.sinh(x) out = paddle.sinh(x)
print(out.numpy()) print(out)
# [-0.41075233 -0.201336 0.10016675 0.30452029] # [-0.41075233 -0.201336 0.10016675 0.30452029]
""") """)
...@@ -328,11 +311,10 @@ Examples: ...@@ -328,11 +311,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5]) x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
out = paddle.round(x) out = paddle.round(x)
print(out.numpy()) print(out)
# [-1. -0. 1. 2.] # [-1. -0. 1. 2.]
""") """)
...@@ -342,11 +324,10 @@ Examples: ...@@ -342,11 +324,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.reciprocal(x) out = paddle.reciprocal(x)
print(out.numpy()) print(out)
# [-2.5 -5. 10. 3.33333333] # [-2.5 -5. 10. 3.33333333]
""") """)
...@@ -356,11 +337,10 @@ Examples: ...@@ -356,11 +337,10 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.square(x) out = paddle.square(x)
print(out.numpy()) print(out)
# [0.16 0.04 0.01 0.09] # [0.16 0.04 0.01 0.09]
""") """)
...@@ -372,8 +352,6 @@ Examples: ...@@ -372,8 +352,6 @@ Examples:
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355] out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
...@@ -386,8 +364,6 @@ Examples: ...@@ -386,8 +364,6 @@ Examples:
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
...@@ -722,9 +698,8 @@ Examples: ...@@ -722,9 +698,8 @@ Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.erf(x) out = paddle.erf(x)
print(out.numpy()) print(out)
# [-0.42839236 -0.22270259 0.11246292 0.32862676] # [-0.42839236 -0.22270259 0.11246292 0.32862676]
""" """
...@@ -1271,28 +1271,26 @@ def has_nan(x): ...@@ -1271,28 +1271,26 @@ def has_nan(x):
def isfinite(x): def isfinite(x):
""" """
:alias_main: paddle.isfinite
:alias: paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite
:old_api: paddle.fluid.layers.isfinite
Test if any of x contains an infinity/NAN number. If all the elements are finite, Test if any of x contains an infinity/NAN number. If all the elements are finite,
returns true, else false. returns true, else false.
Args: Args:
x(variable): The Tensor/LoDTensor to be checked. x(Tensor): The Tensor to be checked.
Returns: Returns:
Variable: The tensor variable storing the output, contains a bool value. Tensor: The tensor storing the output, contains a bool value.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
var = fluid.layers.data(name="data",
shape=(4, 6), x = paddle.rand(shape=[4, 6], dtype='float32')
dtype="float32") y = paddle.fluid.layers.isfinite(x)
out = fluid.layers.isfinite(var) print(y)
""" """
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"isfinite") "isfinite")
......
...@@ -120,11 +120,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', ...@@ -120,11 +120,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
import paddle import paddle
paddle.disable_static()
input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32') input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32')
label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32') label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32')
output = paddle.nn.functional.binary_cross_entropy(input, label) output = paddle.nn.functional.binary_cross_entropy(input, label)
print(output.numpy()) # [0.65537095] print(output) # [0.65537095]
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
...@@ -200,16 +199,16 @@ def binary_cross_entropy_with_logits(logit, ...@@ -200,16 +199,16 @@ def binary_cross_entropy_with_logits(logit,
.. math:: .. math::
Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit)) Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit))
We know that :math:`\\sigma(Logit) = \\frac{1}{1 + \\e^{-Logit}}`. By substituting this we get: We know that :math:`\\sigma(Logit) = \\frac{1}{1 + e^{-Logit}}`. By substituting this we get:
.. math:: .. math::
Out = Logit - Logit * Labels + \\log(1 + \\e^{-Logit}) Out = Logit - Logit * Labels + \\log(1 + e^{-Logit})
For stability and to prevent overflow of :math:`\\e^{-Logit}` when Logit < 0, For stability and to prevent overflow of :math:`e^{-Logit}` when Logit < 0,
we reformulate the loss as follows: we reformulate the loss as follows:
.. math:: .. math::
Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + \\e^{-\|Logit\|}) Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + e^{-\|Logit\|})
Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the
weight tensor on the loss `Out`. The ``weight`` tensor will attach different weight tensor on the loss `Out`. The ``weight`` tensor will attach different
...@@ -254,11 +253,11 @@ def binary_cross_entropy_with_logits(logit, ...@@ -254,11 +253,11 @@ def binary_cross_entropy_with_logits(logit,
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
logit = paddle.to_tensor([5.0, 1.0, 3.0]) logit = paddle.to_tensor([5.0, 1.0, 3.0])
label = paddle.to_tensor([1.0, 0.0, 1.0]) label = paddle.to_tensor([1.0, 0.0, 1.0])
output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label) output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label)
print(output.numpy()) # [0.45618808] print(output) # [0.45618808]
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
...@@ -577,13 +576,12 @@ def margin_ranking_loss(input, ...@@ -577,13 +576,12 @@ def margin_ranking_loss(input,
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
loss = paddle.nn.functional.margin_ranking_loss(input, other, label) loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
print(loss.numpy()) # [0.75] print(loss) # [0.75]
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
raise ValueError( raise ValueError(
...@@ -651,22 +649,22 @@ def l1_loss(input, label, reduction='mean', name=None): ...@@ -651,22 +649,22 @@ def l1_loss(input, label, reduction='mean', name=None):
If `reduction` set to ``'none'``, the loss is: If `reduction` set to ``'none'``, the loss is:
.. math:: .. math::
Out = \lvert input - label\rvert Out = \\lvert input - label \\rvert
If `reduction` set to ``'mean'``, the loss is: If `reduction` set to ``'mean'``, the loss is:
.. math:: .. math::
Out = MEAN(\lvert input - label\rvert) Out = MEAN(\\lvert input - label \\rvert)
If `reduction` set to ``'sum'``, the loss is: If `reduction` set to ``'sum'``, the loss is:
.. math:: .. math::
Out = SUM(\lvert input - label\rvert) Out = SUM(\\lvert input - label\\rvert)
Parameters: Parameters:
input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64. input (Tensor): The input tensor. The shapes is [N, `*`], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64.
label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64, int32, int64. label (Tensor): label. The shapes is [N, `*`], same shape as ``input`` . It's data type should be float32, float64, int32, int64.
reduction (str, optional): Indicate the reduction to apply to the loss, reduction (str, optional): Indicate the reduction to apply to the loss,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If `reduction` is ``'none'``, the unreduced loss is returned; If `reduction` is ``'none'``, the unreduced loss is returned;
...@@ -674,12 +672,15 @@ def l1_loss(input, label, reduction='mean', name=None): ...@@ -674,12 +672,15 @@ def l1_loss(input, label, reduction='mean', name=None):
If `reduction` is ``'sum'``, the reduced sum loss is returned. If `reduction` is ``'sum'``, the reduced sum loss is returned.
Default is ``'mean'``. Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
Tensor, the L1 Loss of Tensor ``input`` and ``label``. Tensor, the L1 Loss of Tensor ``input`` and ``label``.
If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` . If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` .
If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1]. If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1].
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static() paddle.disable_static()
......
...@@ -40,15 +40,15 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): ...@@ -40,15 +40,15 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
.. math:: .. math::
y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) } y = \\frac{x}{ \\max\\left( \\lvert \\lvert x \\rvert \\rvert_p, epsilon\\right) }
.. math:: .. math::
\lvert \lvert x \rvert \rvert_p = \left(\sum_i {\lvert x_i\rvert^p} \right)^{1/p} \\lvert \\lvert x \\rvert \\rvert_p = \\left( \\sum_i {\\lvert x_i \\rvert^p} \\right)^{1/p}
where, :math:`\sum_i{\lvert x_i\rvert^p}` is calculated along the ``axis`` dimension. where, :math:`\\sum_i{\\lvert x_i \\rvert^p}` is calculated along the ``axis`` dimension.
Args: Parameters:
x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
p (float|int, optional): The exponent value in the norm formulation. Default: 2 p (float|int, optional): The exponent value in the norm formulation. Default: 2
axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension. axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension.
......
...@@ -838,9 +838,13 @@ class MarginRankingLoss(fluid.dygraph.Layer): ...@@ -838,9 +838,13 @@ class MarginRankingLoss(fluid.dygraph.Layer):
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Shape: Shape:
input: N-D Tensor, the shape is [N, *], N is batch size and `*` means any number of additional dimensions., available dtype is float32, float64.
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.
other: N-D Tensor, `other` have the same shape and dtype as `input`. other: N-D Tensor, `other` have the same shape and dtype as `input`.
label: N-D Tensor, label have the same shape and dtype as `input`. label: N-D Tensor, label have the same shape and dtype as `input`.
output: If :attr:`reduction` is ``'mean'`` or ``'sum'`` , the out shape is :math:`[1]`, otherwise the shape is the same as `input` .The same dtype as input tensor. output: If :attr:`reduction` is ``'mean'`` or ``'sum'`` , the out shape is :math:`[1]`, otherwise the shape is the same as `input` .The same dtype as input tensor.
Returns: Returns:
...@@ -851,14 +855,13 @@ class MarginRankingLoss(fluid.dygraph.Layer): ...@@ -851,14 +855,13 @@ class MarginRankingLoss(fluid.dygraph.Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32") input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32")
other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32") other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32")
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32") label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss() margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label) loss = margin_rank_loss(input, other, label)
print(loss.numpy()) # [0.75] print(loss) # [0.75]
""" """
def __init__(self, margin=0.0, reduction='mean', name=None): def __init__(self, margin=0.0, reduction='mean', name=None):
......
...@@ -54,9 +54,6 @@ __all__ = [ ...@@ -54,9 +54,6 @@ __all__ = [
def equal_all(x, y, name=None): def equal_all(x, y, name=None):
""" """
:alias_main: paddle.equal_all
:alias: paddle.equal_all,paddle.tensor.equal_all,paddle.tensor.logic.equal_all
This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise. This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
...@@ -75,14 +72,13 @@ def equal_all(x, y, name=None): ...@@ -75,14 +72,13 @@ def equal_all(x, y, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 2, 3]) y = paddle.to_tensor([1, 2, 3])
z = paddle.to_tensor([1, 4, 3]) z = paddle.to_tensor([1, 4, 3])
result1 = paddle.equal_all(x, y) result1 = paddle.equal_all(x, y)
print(result1.numpy()) # result1 = [True ] print(result1) # result1 = [True ]
result2 = paddle.equal_all(x, z) result2 = paddle.equal_all(x, z)
print(result2.numpy()) # result2 = [False ] print(result2) # result2 = [False ]
""" """
helper = LayerHelper("equal_all", **locals()) helper = LayerHelper("equal_all", **locals())
...@@ -122,8 +118,6 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): ...@@ -122,8 +118,6 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([10000., 1e-07]) x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08]) y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
...@@ -189,10 +183,9 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): ...@@ -189,10 +183,9 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
@templatedoc() @templatedoc()
def equal(x, y, name=None): def equal(x, y, name=None):
""" """
:alias_main: paddle.equal
:alias: paddle.equal,paddle.tensor.equal,paddle.tensor.logic.equal
This layer returns the truth value of :math:`x == y` elementwise. This layer returns the truth value of :math:`x == y` elementwise.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
...@@ -210,11 +203,10 @@ def equal(x, y, name=None): ...@@ -210,11 +203,10 @@ def equal(x, y, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.equal(x, y) result1 = paddle.equal(x, y)
print(result1.numpy()) # result1 = [True False False] print(result1) # result1 = [True False False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.equal(x, y) return core.ops.equal(x, y)
...@@ -236,10 +228,8 @@ def equal(x, y, name=None): ...@@ -236,10 +228,8 @@ def equal(x, y, name=None):
@templatedoc() @templatedoc()
def greater_equal(x, y, name=None): def greater_equal(x, y, name=None):
""" """
:alias_main: paddle.greater_equal
:alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`. This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
...@@ -252,13 +242,13 @@ def greater_equal(x, y, name=None): ...@@ -252,13 +242,13 @@ def greater_equal(x, y, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_equal(x, y) result1 = paddle.greater_equal(x, y)
print(result1.numpy()) # result1 = [True False True] print(result1) # result1 = [True False True]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.greater_equal(x, y) return core.ops.greater_equal(x, y)
...@@ -282,10 +272,8 @@ def greater_equal(x, y, name=None): ...@@ -282,10 +272,8 @@ def greater_equal(x, y, name=None):
@templatedoc() @templatedoc()
def greater_than(x, y, name=None): def greater_than(x, y, name=None):
""" """
:alias_main: paddle.greater_than
:alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`. This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
...@@ -298,13 +286,13 @@ def greater_than(x, y, name=None): ...@@ -298,13 +286,13 @@ def greater_than(x, y, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_than(x, y) result1 = paddle.greater_than(x, y)
print(result1.numpy()) # result1 = [False False True] print(result1) # result1 = [False False True]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.greater_than(x, y) return core.ops.greater_than(x, y)
...@@ -328,10 +316,8 @@ def greater_than(x, y, name=None): ...@@ -328,10 +316,8 @@ def greater_than(x, y, name=None):
@templatedoc() @templatedoc()
def less_equal(x, y, name=None): def less_equal(x, y, name=None):
""" """
:alias_main: paddle.less_equal
:alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`. This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
...@@ -345,13 +331,13 @@ def less_equal(x, y, name=None): ...@@ -345,13 +331,13 @@ def less_equal(x, y, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_equal(x, y) result1 = paddle.less_equal(x, y)
print(result1.numpy()) # result1 = [True True False] print(result1) # result1 = [True True False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.less_equal(x, y) return core.ops.less_equal(x, y)
...@@ -373,10 +359,8 @@ def less_equal(x, y, name=None): ...@@ -373,10 +359,8 @@ def less_equal(x, y, name=None):
@templatedoc() @templatedoc()
def less_than(x, y, name=None): def less_than(x, y, name=None):
""" """
:alias_main: paddle.less_than
:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than
This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`. This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
...@@ -390,13 +374,13 @@ def less_than(x, y, name=None): ...@@ -390,13 +374,13 @@ def less_than(x, y, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_than(x, y) result1 = paddle.less_than(x, y)
print(result1.numpy()) # result1 = [False True False] print(result1) # result1 = [False True False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.less_than(x, y) return core.ops.less_than(x, y)
...@@ -418,10 +402,8 @@ def less_than(x, y, name=None): ...@@ -418,10 +402,8 @@ def less_than(x, y, name=None):
@templatedoc() @templatedoc()
def not_equal(x, y, name=None): def not_equal(x, y, name=None):
""" """
:alias_main: paddle.not_equal
:alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`. This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
...@@ -438,11 +420,10 @@ def not_equal(x, y, name=None): ...@@ -438,11 +420,10 @@ def not_equal(x, y, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.not_equal(x, y) result1 = paddle.not_equal(x, y)
print(result1.numpy()) # result1 = [False True True] print(result1) # result1 = [False True True]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.not_equal(x, y) return core.ops.not_equal(x, y)
......
...@@ -166,8 +166,6 @@ def pow(x, y, name=None): ...@@ -166,8 +166,6 @@ def pow(x, y, name=None):
import paddle import paddle
paddle.disable_static()
# example 1: y is a float # example 1: y is a float
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = 2 y = 2
...@@ -474,32 +472,30 @@ Examples: ...@@ -474,32 +472,30 @@ Examples:
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
x = paddle.to_tensor([[1, 2], [3, 4]]) x = paddle.to_tensor([[1, 2], [3, 4]])
y = paddle.to_tensor([[5, 6], [7, 8]]) y = paddle.to_tensor([[5, 6], [7, 8]])
res = paddle.maximum(x, y) res = paddle.maximum(x, y)
print(res.numpy()) print(res)
#[[5. 6.] #[[5. 6.]
# [7. 8.]] # [7. 8.]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]]) x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([1, 2]) y = paddle.to_tensor([1, 2])
res = paddle.maximum(x, y, axis=1) res = paddle.maximum(x, y, axis=1)
print(res.numpy()) print(res)
#[[[1. 2. 3.] #[[[1. 2. 3.]
# [2. 2. 3.]]] # [2. 2. 3.]]]
x = paddle.to_tensor([2, 3, 5], dtype='float32') x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32') y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.maximum(x, y) res = paddle.maximum(x, y)
print(res.numpy()) print(res)
#[ 2. 4. nan] #[ 2. 4. nan]
x = paddle.to_tensor([5, 3, np.inf], dtype='float32') x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, 4, 5], dtype='float32') y = paddle.to_tensor([1, 4, 5], dtype='float32')
res = paddle.maximum(x, y) res = paddle.maximum(x, y)
print(res.numpy()) print(res)
#[ 5. 4. inf] #[ 5. 4. inf]
""" """
op_type = 'elementwise_max' op_type = 'elementwise_max'
...@@ -518,32 +514,30 @@ Examples: ...@@ -518,32 +514,30 @@ Examples:
import paddle import paddle
import numpy as np import numpy as np
paddle.disable_static()
x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32') y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32')
res = paddle.minimum(x, y) res = paddle.minimum(x, y)
print(res.numpy()) print(res)
#[[1. 2.] #[[1. 2.]
# [3. 4.]] # [3. 4.]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32') x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32')
y = paddle.to_tensor([1, 2], dtype='float32') y = paddle.to_tensor([1, 2], dtype='float32')
res = paddle.minimum(x, y, axis=1) res = paddle.minimum(x, y, axis=1)
print(res.numpy()) print(res)
#[[[1. 1. 1.] #[[[1. 1. 1.]
# [2. 2. 2.]]] # [2. 2. 2.]]]
x = paddle.to_tensor([2, 3, 5], dtype='float32') x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32') y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.minimum(x, y) res = paddle.minimum(x, y)
print(res.numpy()) print(res)
#[ 1. 3. nan] #[ 1. 3. nan]
x = paddle.to_tensor([5, 3, np.inf], dtype='float32') x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, 4, 5], dtype='float32') y = paddle.to_tensor([1, 4, 5], dtype='float32')
res = paddle.minimum(x, y) res = paddle.minimum(x, y)
print(res.numpy()) print(res)
#[1. 3. 5.] #[1. 3. 5.]
""" """
op_type = 'elementwise_min' op_type = 'elementwise_min'
...@@ -584,7 +578,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -584,7 +578,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64. x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.
axis (int|list|tuple, optional): The dimensions along which the sum is performed. If axis (int|list|tuple, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`x` and return a :attr:`None`, sum all elements of :attr:`x` and return a
Tensor variable with a single element, otherwise must be in the Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`, range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`. the dimension to reduce is :math:`rank + axis[i]`.
dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
...@@ -785,8 +779,6 @@ def add_n(inputs, name=None): ...@@ -785,8 +779,6 @@ def add_n(inputs, name=None):
def mm(input, mat2, name=None): def mm(input, mat2, name=None):
""" """
:alias_main: paddle.mm
:alias: paddle.mm,paddle.tensor.mm,paddle.tensor.math.mm
Applies matrix multiplication to two tensors. Applies matrix multiplication to two tensors.
...@@ -799,41 +791,42 @@ def mm(input, mat2, name=None): ...@@ -799,41 +791,42 @@ def mm(input, mat2, name=None):
removed after matrix multiplication. removed after matrix multiplication.
Args: Args:
x (Variable): The input variable which is a Tensor or LoDTensor. x (Tensor): The input tensor which is a Tensor.
mat2 (Variable): The input variable which is a Tensor or LoDTensor. mat2 (Tensor): The input tensor which is a Tensor.
name(str, optional): The default value is None. Normally there is no need for name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns: Returns:
Variable: The product Tensor (or LoDTensor) variable. Tensor: The product Tensor.
Examples: Examples:
.. code-block:: python .. code-block:: python
# Examples to clarify shapes of the inputs and output # Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], mat2: [B, ..., K, N] # x: [B, ..., M, K], mat2: [B, ..., K, N]
# fluid.layers.matmul(x, mat2) # out: [B, ..., M, N] # paddle.matmul(x, mat2) # out: [B, ..., M, N]
# x: [B, M, K], mat2: [B, K, N] # x: [B, M, K], mat2: [B, K, N]
# fluid.layers.matmul(x, mat2) # out: [B, M, N] # paddle.matmul(x, mat2) # out: [B, M, N]
# x: [B, M, K], mat2: [K, N] # x: [B, M, K], mat2: [K, N]
# fluid.layers.matmul(x, mat2) # out: [B, M, N] # paddle.matmul(x, mat2) # out: [B, M, N]
# x: [M, K], mat2: [K, N] # x: [M, K], mat2: [K, N]
# fluid.layers.matmul(x, mat2) # out: [M, N] # paddle.matmul(x, mat2) # out: [M, N]
# x: [B, M, K], mat2: [K] # x: [B, M, K], mat2: [K]
# fluid.layers.matmul(x, mat2) # out: [B, M] # paddle.matmul(x, mat2) # out: [B, M]
# x: [K], mat2: [K] # x: [K], mat2: [K]
# fluid.layers.matmul(x, mat2) # out: [1] # paddle.matmul(x, mat2) # out: [1]
import paddle import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3], dtype='float32') x = paddle.rand(shape=[2, 3], dtype='float32')
mat2 = fluid.data(name='mat2', shape=[3, 2], dtype='float32') y = paddle.rand(shape=[3, 2], dtype='float32')
out = paddle.mm(x, mat2) # out shape is [2, 2] out = paddle.mm(x, y)
print(out.shape) # [2, 2]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
out = _varbase_creator(dtype=input.dtype) out = _varbase_creator(dtype=input.dtype)
...@@ -921,7 +914,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): ...@@ -921,7 +914,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
out = paddle.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 ) out = paddle.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )
print( out.numpy() ) print(out)
# [[10.5 10.5] # [[10.5 10.5]
# [10.5 10.5]] # [10.5 10.5]]
""" """
...@@ -1085,7 +1078,7 @@ def max(x, axis=None, keepdim=False, name=None): ...@@ -1085,7 +1078,7 @@ def max(x, axis=None, keepdim=False, name=None):
float64, int32, int64. float64, int32, int64.
axis(list|int, optional): The axis along which the maximum is computed. axis(list|int, optional): The axis along which the maximum is computed.
If :attr:`None`, compute the maximum over all elements of If :attr:`None`, compute the maximum over all elements of
`x` and return a Tensor variable with a single element, `x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`. otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.
If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`. If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the keepdim(bool, optional): Whether to reserve the reduced dimension in the
...@@ -1104,37 +1097,35 @@ def max(x, axis=None, keepdim=False, name=None): ...@@ -1104,37 +1097,35 @@ def max(x, axis=None, keepdim=False, name=None):
import paddle import paddle
paddle.disable_static() # data_x is a Tensor with shape [2, 4]
# data_x is a variable with shape [2, 4]
# the axis is a int element # the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]]) [0.1, 0.2, 0.6, 0.7]])
result1 = paddle.max(x) result1 = paddle.max(x)
print(result1.numpy()) print(result1)
#[0.9] #[0.9]
result2 = paddle.max(x, axis=0) result2 = paddle.max(x, axis=0)
print(result2.numpy()) print(result2)
#[0.2 0.3 0.6 0.9] #[0.2 0.3 0.6 0.9]
result3 = paddle.max(x, axis=-1) result3 = paddle.max(x, axis=-1)
print(result3.numpy()) print(result3)
#[0.9 0.7] #[0.9 0.7]
result4 = paddle.max(x, axis=1, keepdim=True) result4 = paddle.max(x, axis=1, keepdim=True)
print(result4.numpy()) print(result4)
#[[0.9] #[[0.9]
# [0.7]] # [0.7]]
# data_y is a variable with shape [2, 2, 2] # data_y is a Tensor with shape [2, 2, 2]
# the axis is list # the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]]) [[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.max(y, axis=[1, 2]) result5 = paddle.max(y, axis=[1, 2])
print(result5.numpy()) print(result5)
#[4. 8.] #[4. 8.]
result6 = paddle.max(y, axis=[0, 1]) result6 = paddle.max(y, axis=[0, 1])
print(result6.numpy()) print(result6)
#[7. 8.] #[7. 8.]
""" """
...@@ -1179,7 +1170,7 @@ def min(x, axis=None, keepdim=False, name=None): ...@@ -1179,7 +1170,7 @@ def min(x, axis=None, keepdim=False, name=None):
x(Tensor): A tensor, the data type is float32, float64, int32, int64. x(Tensor): A tensor, the data type is float32, float64, int32, int64.
axis(list|int, optional): The axis along which the minimum is computed. axis(list|int, optional): The axis along which the minimum is computed.
If :attr:`None`, compute the minimum over all elements of If :attr:`None`, compute the minimum over all elements of
`x` and return a Tensor variable with a single element, `x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim, x.ndim)`. otherwise must be in the range :math:`[-x.ndim, x.ndim)`.
If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`. If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the keepdim(bool, optional): Whether to reserve the reduced dimension in the
...@@ -1198,35 +1189,33 @@ def min(x, axis=None, keepdim=False, name=None): ...@@ -1198,35 +1189,33 @@ def min(x, axis=None, keepdim=False, name=None):
import paddle import paddle
paddle.disable_static()
# x is a tensor with shape [2, 4] # x is a tensor with shape [2, 4]
# the axis is a int element # the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]]) [0.1, 0.2, 0.6, 0.7]])
result1 = paddle.min(x) result1 = paddle.min(x)
print(result1.numpy()) print(result1)
#[0.1] #[0.1]
result2 = paddle.min(x, axis=0) result2 = paddle.min(x, axis=0)
print(result2.numpy()) print(result2)
#[0.1 0.2 0.5 0.7] #[0.1 0.2 0.5 0.7]
result3 = paddle.min(x, axis=-1) result3 = paddle.min(x, axis=-1)
print(result3.numpy()) print(result3)
#[0.2 0.1] #[0.2 0.1]
result4 = paddle.min(x, axis=1, keepdim=True) result4 = paddle.min(x, axis=1, keepdim=True)
print(result4.numpy()) print(result4)
#[[0.2] #[[0.2]
# [0.1]] # [0.1]]
# y is a variable with shape [2, 2, 2] # y is a Tensor with shape [2, 2, 2]
# the axis is list # the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]]) [[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.min(y, axis=[1, 2]) result5 = paddle.min(y, axis=[1, 2])
print(result5.numpy()) print(result5)
#[1. 5.] #[1. 5.]
result6 = paddle.min(y, axis=[0, 1]) result6 = paddle.min(y, axis=[0, 1])
print(result6.numpy()) print(result6)
#[1. 2.] #[1. 2.]
""" """
...@@ -1265,6 +1254,7 @@ def min(x, axis=None, keepdim=False, name=None): ...@@ -1265,6 +1254,7 @@ def min(x, axis=None, keepdim=False, name=None):
def log1p(x, name=None): def log1p(x, name=None):
r""" r"""
Calculates the natural log of the given input tensor, element-wise. Calculates the natural log of the given input tensor, element-wise.
.. math:: .. math::
Out = \\ln(x+1) Out = \\ln(x+1)
...@@ -1423,7 +1413,7 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None): ...@@ -1423,7 +1413,7 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None):
tensor1 = paddle.ones([2,2]) tensor1 = paddle.ones([2,2])
tensor2 = paddle.ones([2,2]) tensor2 = paddle.ones([2,2])
out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=0.5) out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=0.5)
print(out.numpy()) print(out)
# [[1.5 1.5] # [[1.5 1.5]
# [1.5 1.5]] # [1.5 1.5]]
""" """
...@@ -1442,8 +1432,6 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None): ...@@ -1442,8 +1432,6 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None):
def clip(x, min=None, max=None, name=None): def clip(x, min=None, max=None, name=None):
""" """
**clip layer**
This operator clip all elements in input into the range [ min, max ] and return This operator clip all elements in input into the range [ min, max ] and return
a resulting tensor as the following equation: a resulting tensor as the following equation:
...@@ -1468,6 +1456,7 @@ def clip(x, min=None, max=None, name=None): ...@@ -1468,6 +1456,7 @@ def clip(x, min=None, max=None, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32') x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32')
out1 = paddle.clip(x1, min=3.5, max=5.0) out1 = paddle.clip(x1, min=3.5, max=5.0)
out2 = paddle.clip(x1, min=2.5) out2 = paddle.clip(x1, min=2.5)
...@@ -1626,9 +1615,9 @@ def kron(x, y, name=None): ...@@ -1626,9 +1615,9 @@ def kron(x, y, name=None):
${comment} ${comment}
Args: Args:
x (Variable): the fist operand of kron op, data type: float16, float32, x (Tensor): the fist operand of kron op, data type: float16, float32,
float64, int32 or int64. float64, int32 or int64.
y (Variable): the second operand of kron op, data type: float16, y (Tensor): the second operand of kron op, data type: float16,
float32, float64, int32 or int64. Its data type should be the same float32, float64, int32 or int64. Its data type should be the same
with x. with x.
name(str, optional): The default value is None. Normally there is no name(str, optional): The default value is None. Normally there is no
...@@ -1636,7 +1625,7 @@ ${comment} ...@@ -1636,7 +1625,7 @@ ${comment}
refer to :ref:`api_guide_Name`. refer to :ref:`api_guide_Name`.
Returns: Returns:
Variable: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x. Tensor: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -1755,10 +1744,10 @@ def isfinite(x, name=None): ...@@ -1755,10 +1744,10 @@ def isfinite(x, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isfinite(x) out = paddle.tensor.isfinite(x)
print(out.numpy()) # [False True True False True False False] print(out) # [False True True False True False False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.isfinite_v2(x) return core.ops.isfinite_v2(x)
...@@ -1784,10 +1773,9 @@ def isinf(x, name=None): ...@@ -1784,10 +1773,9 @@ def isinf(x, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isinf(x) out = paddle.tensor.isinf(x)
print(out.numpy()) # [ True False False True False False False] print(out) # [ True False False True False False False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.isinf_v2(x) return core.ops.isinf_v2(x)
...@@ -1813,10 +1801,9 @@ def isnan(x, name=None): ...@@ -1813,10 +1801,9 @@ def isnan(x, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isnan(x) out = paddle.tensor.isnan(x)
print(out.numpy()) # [False False False False False True True] print(out) # [False False False False False True True]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.isnan_v2(x) return core.ops.isnan_v2(x)
...@@ -1947,10 +1934,9 @@ def tanh(x, name=None): ...@@ -1947,10 +1934,9 @@ def tanh(x, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.tanh(x) out = paddle.tanh(x)
print(out.numpy()) print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261] # [-0.37994896 -0.19737532 0.09966799 0.29131261]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
...@@ -2008,7 +1994,7 @@ def all(x, axis=None, keepdim=False, name=None): ...@@ -2008,7 +1994,7 @@ def all(x, axis=None, keepdim=False, name=None):
x (Tensor): An N-D Tensor, the input data type should be `bool`. x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical and`` is compute. If axis (int|list|tuple, optional): The dimensions along which the ``logical and`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a :attr:`None`, and all elements of :attr:`x` and return a
Tensor variable with a single element, otherwise must be in the Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`, range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`. the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the keepdim (bool, optional): Whether to reserve the reduced dimension in the
...@@ -2033,10 +2019,7 @@ def all(x, axis=None, keepdim=False, name=None): ...@@ -2033,10 +2019,7 @@ def all(x, axis=None, keepdim=False, name=None):
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import numpy as np import numpy as np
# set as static mode # x is a bool Tensor with following elements:
paddle.disable_static()
# x is a bool Tensor variable with following elements:
# [[True, False] # [[True, False]
# [True, True]] # [True, True]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32')) x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
...@@ -2107,7 +2090,7 @@ def any(x, axis=None, keepdim=False, name=None): ...@@ -2107,7 +2090,7 @@ def any(x, axis=None, keepdim=False, name=None):
x (Tensor): An N-D Tensor, the input data type should be `bool`. x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical or`` is compute. If axis (int|list|tuple, optional): The dimensions along which the ``logical or`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a :attr:`None`, and all elements of :attr:`x` and return a
Tensor variable with a single element, otherwise must be in the Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`, range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`. the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the keepdim (bool, optional): Whether to reserve the reduced dimension in the
...@@ -2132,10 +2115,7 @@ def any(x, axis=None, keepdim=False, name=None): ...@@ -2132,10 +2115,7 @@ def any(x, axis=None, keepdim=False, name=None):
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import numpy as np import numpy as np
# set as static mode # x is a bool Tensor with following elements:
paddle.disable_static()
# x is a bool Tensor variable with following elements:
# [[True, False] # [[True, False]
# [False, False]] # [False, False]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32')) x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
......
...@@ -39,9 +39,6 @@ from paddle.common_ops_import import * ...@@ -39,9 +39,6 @@ from paddle.common_ops_import import *
def argsort(x, axis=-1, descending=False, name=None): def argsort(x, axis=-1, descending=False, name=None):
""" """
:alias_main: paddle.argsort
:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
This OP sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True. This OP sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
Args: Args:
...@@ -67,7 +64,6 @@ def argsort(x, axis=-1, descending=False, name=None): ...@@ -67,7 +64,6 @@ def argsort(x, axis=-1, descending=False, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([[[5,8,9,5], x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]], [6,9,2,4]],
...@@ -78,21 +74,21 @@ def argsort(x, axis=-1, descending=False, name=None): ...@@ -78,21 +74,21 @@ def argsort(x, axis=-1, descending=False, name=None):
out1 = paddle.argsort(x=x, axis=-1) out1 = paddle.argsort(x=x, axis=-1)
out2 = paddle.argsort(x=x, axis=0) out2 = paddle.argsort(x=x, axis=0)
out3 = paddle.argsort(x=x, axis=1) out3 = paddle.argsort(x=x, axis=1)
print(out1.numpy()) print(out1)
#[[[0 3 1 2] #[[[0 3 1 2]
# [0 1 2 3] # [0 1 2 3]
# [2 3 0 1]] # [2 3 0 1]]
# [[1 3 2 0] # [[1 3 2 0]
# [0 1 2 3] # [0 1 2 3]
# [2 0 3 1]]] # [2 0 3 1]]]
print(out2.numpy()) print(out2)
#[[[0 1 1 1] #[[[0 1 1 1]
# [0 0 0 0] # [0 0 0 0]
# [1 1 1 0]] # [1 1 1 0]]
# [[1 0 0 0] # [[1 0 0 0]
# [1 1 1 1] # [1 1 1 1]
# [0 0 0 1]]] # [0 0 0 1]]]
print(out3.numpy()) print(out3)
#[[[1 1 1 2] #[[[1 1 1 2]
# [0 0 2 0] # [0 0 2 0]
# [2 2 0 1]] # [2 2 0 1]]
...@@ -149,17 +145,16 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): ...@@ -149,17 +145,16 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([[5,8,9,5], x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]]) [6,9,2,4]])
out1 = paddle.argmax(x) out1 = paddle.argmax(x)
print(out1.numpy()) # 2 print(out1) # 2
out2 = paddle.argmax(x, axis=1) out2 = paddle.argmax(x, axis=1)
print(out2.numpy()) print(out2)
# [2 3 1] # [2 3 1]
out3 = paddle.argmax(x, axis=-1) out3 = paddle.argmax(x, axis=-1)
print(out3.numpy()) print(out3)
# [2 3 1] # [2 3 1]
""" """
if axis is not None and not isinstance(axis, int): if axis is not None and not isinstance(axis, int):
...@@ -227,17 +222,16 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): ...@@ -227,17 +222,16 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([[5,8,9,5], x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]]) [6,9,2,4]])
out1 = paddle.argmin(x) out1 = paddle.argmin(x)
print(out1.numpy()) # 4 print(out1) # 4
out2 = paddle.argmin(x, axis=1) out2 = paddle.argmin(x, axis=1)
print(out2.numpy()) print(out2)
# [0 0 2] # [0 0 2]
out3 = paddle.argmin(x, axis=-1) out3 = paddle.argmin(x, axis=-1)
print(out3.numpy()) print(out3)
# [0 0 2] # [0 0 2]
""" """
if axis is not None and not isinstance(axis, int): if axis is not None and not isinstance(axis, int):
...@@ -357,22 +351,20 @@ def nonzero(x, as_tuple=False): ...@@ -357,22 +351,20 @@ def nonzero(x, as_tuple=False):
.. code-block:: python .. code-block:: python
import paddle import paddle
x1 = paddle.to_tensor([[1.0, 0.0, 0.0], x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0], [0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]]) [0.0, 0.0, 3.0]])
x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0]) x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0])
x3 = paddle.to_tensor([0.0, 0.0, 0.0])
out_z1 = paddle.nonzero(x1) out_z1 = paddle.nonzero(x1)
print(out_z1.numpy()) print(out_z1)
#[[0 0] #[[0 0]
# [1 1] # [1 1]
# [2 2]] # [2 2]]
out_z1_tuple = paddle.nonzero(x1, as_tuple=True) out_z1_tuple = paddle.nonzero(x1, as_tuple=True)
for out in out_z1_tuple: for out in out_z1_tuple:
print(out.numpy()) print(out)
#[[0] #[[0]
# [1] # [1]
# [2]] # [2]]
...@@ -380,21 +372,15 @@ def nonzero(x, as_tuple=False): ...@@ -380,21 +372,15 @@ def nonzero(x, as_tuple=False):
# [1] # [1]
# [2]] # [2]]
out_z2 = paddle.nonzero(x2) out_z2 = paddle.nonzero(x2)
print(out_z2.numpy()) print(out_z2)
#[[1] #[[1]
# [3]] # [3]]
out_z2_tuple = paddle.nonzero(x2, as_tuple=True) out_z2_tuple = paddle.nonzero(x2, as_tuple=True)
for out in out_z2_tuple: for out in out_z2_tuple:
print(out.numpy()) print(out)
#[[1] #[[1]
# [3]] # [3]]
out_z3 = paddle.nonzero(x3)
print(out_z3.numpy())
#[]
out_z3_tuple = paddle.nonzero(x3, as_tuple=True)
for out in out_z3_tuple:
print(out.numpy())
#[]
""" """
list_out = [] list_out = []
shape = x.shape shape = x.shape
...@@ -419,8 +405,6 @@ def nonzero(x, as_tuple=False): ...@@ -419,8 +405,6 @@ def nonzero(x, as_tuple=False):
def sort(x, axis=-1, descending=False, name=None): def sort(x, axis=-1, descending=False, name=None):
""" """
:alias_main: paddle.sort
:alias: paddle.sort,paddle.tensor.sort,paddle.tensor.search.sort
This OP sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True. This OP sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
...@@ -439,10 +423,11 @@ def sort(x, axis=-1, descending=False, name=None): ...@@ -439,10 +423,11 @@ def sort(x, axis=-1, descending=False, name=None):
Returns: Returns:
Tensor: sorted tensor(with the same shape and data type as ``x``). Tensor: sorted tensor(with the same shape and data type as ``x``).
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([[[5,8,9,5], x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]], [6,9,2,4]],
...@@ -453,21 +438,21 @@ def sort(x, axis=-1, descending=False, name=None): ...@@ -453,21 +438,21 @@ def sort(x, axis=-1, descending=False, name=None):
out1 = paddle.sort(x=x, axis=-1) out1 = paddle.sort(x=x, axis=-1)
out2 = paddle.sort(x=x, axis=0) out2 = paddle.sort(x=x, axis=0)
out3 = paddle.sort(x=x, axis=1) out3 = paddle.sort(x=x, axis=1)
print(out1.numpy()) print(out1)
#[[[5. 5. 8. 9.] #[[[5. 5. 8. 9.]
# [0. 0. 1. 7.] # [0. 0. 1. 7.]
# [2. 4. 6. 9.]] # [2. 4. 6. 9.]]
# [[2. 2. 4. 5.] # [[2. 2. 4. 5.]
# [4. 7. 7. 9.] # [4. 7. 7. 9.]
# [0. 1. 6. 7.]]] # [0. 1. 6. 7.]]]
print(out2.numpy()) print(out2)
#[[[5. 2. 4. 2.] #[[[5. 2. 4. 2.]
# [0. 0. 1. 7.] # [0. 0. 1. 7.]
# [1. 7. 0. 4.]] # [1. 7. 0. 4.]]
# [[5. 8. 9. 5.] # [[5. 8. 9. 5.]
# [4. 7. 7. 9.] # [4. 7. 7. 9.]
# [6. 9. 2. 6.]]] # [6. 9. 2. 6.]]]
print(out3.numpy()) print(out3)
#[[[0. 0. 1. 4.] #[[[0. 0. 1. 4.]
# [5. 8. 2. 5.] # [5. 8. 2. 5.]
# [6. 9. 9. 7.]] # [6. 9. 9. 7.]]
...@@ -610,7 +595,7 @@ def index_sample(x, index): ...@@ -610,7 +595,7 @@ def index_sample(x, index):
[500, 600, 700, 800], [500, 600, 700, 800],
[900, 1000, 1100, 1200]], dtype='int32') [900, 1000, 1100, 1200]], dtype='int32')
out_z1 = paddle.index_sample(x, index) out_z1 = paddle.index_sample(x, index)
print(out_z1.numpy()) print(out_z1)
#[[1. 2. 3.] #[[1. 2. 3.]
# [6. 7. 8.] # [6. 7. 8.]
# [9. 9. 9.]] # [9. 9. 9.]]
...@@ -619,17 +604,17 @@ def index_sample(x, index): ...@@ -619,17 +604,17 @@ def index_sample(x, index):
# get the value of the element of the corresponding index in other tensors # get the value of the element of the corresponding index in other tensors
top_value, top_index = paddle.topk(x, k=2) top_value, top_index = paddle.topk(x, k=2)
out_z2 = paddle.index_sample(target, top_index) out_z2 = paddle.index_sample(target, top_index)
print(top_value.numpy()) print(top_value)
#[[ 4. 3.] #[[ 4. 3.]
# [ 8. 7.] # [ 8. 7.]
# [12. 11.]] # [12. 11.]]
print(top_index.numpy()) print(top_index)
#[[3 2] #[[3 2]
# [3 2] # [3 2]
# [3 2]] # [3 2]]
print(out_z2.numpy()) print(out_z2)
#[[ 400 300] #[[ 400 300]
# [ 800 700] # [ 800 700]
# [1200 1100]] # [1200 1100]]
...@@ -673,7 +658,6 @@ def masked_select(x, mask, name=None): ...@@ -673,7 +658,6 @@ def masked_select(x, mask, name=None):
import paddle import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0], x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]]) [9.0, 10.0, 11.0, 12.0]])
...@@ -726,33 +710,31 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): ...@@ -726,33 +710,31 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
import paddle import paddle
paddle.disable_static()
tensor_1 = paddle.to_tensor([1, 4, 5, 7]) tensor_1 = paddle.to_tensor([1, 4, 5, 7])
value_1, indices_1 = paddle.topk(tensor_1, k=1) value_1, indices_1 = paddle.topk(tensor_1, k=1)
print(value_1.numpy()) print(value_1)
# [7] # [7]
print(indices_1.numpy()) print(indices_1)
# [3] # [3]
tensor_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]]) tensor_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])
value_2, indices_2 = paddle.topk(tensor_2, k=1) value_2, indices_2 = paddle.topk(tensor_2, k=1)
print(value_2.numpy()) print(value_2)
# [[7] # [[7]
# [6]] # [6]]
print(indices_2.numpy()) print(indices_2)
# [[3] # [[3]
# [1]] # [1]]
value_3, indices_3 = paddle.topk(tensor_2, k=1, axis=-1) value_3, indices_3 = paddle.topk(tensor_2, k=1, axis=-1)
print(value_3.numpy()) print(value_3)
# [[7] # [[7]
# [6]] # [6]]
print(indices_3.numpy()) print(indices_3)
# [[3] # [[3]
# [1]] # [1]]
value_4, indices_4 = paddle.topk(tensor_2, k=1, axis=0) value_4, indices_4 = paddle.topk(tensor_2, k=1, axis=0)
print(value_4.numpy()) print(value_4)
# [[2 6 5 7]] # [[2 6 5 7]]
print(indices_4.numpy()) print(indices_4)
# [[1 1 0 0]] # [[1 1 0 0]]
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册