未验证 提交 da71173b 编写于 作者: N Noel 提交者: GitHub

Fix ops doc for some ops

Fix ops doc for some ops 
上级 770395cb
......@@ -203,7 +203,7 @@ $$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
UNUSED constexpr char SqrtDoc[] = R"DOC(
Sqrt Activation Operator.
.. math:: out=\\sqrt{x}=x^{1/2}
$$out=\\sqrt{x}=x^{1/2}$$
**Note**:
input value must be greater than or equal to zero.
......@@ -229,14 +229,14 @@ $$out = |x|$$
UNUSED constexpr char CeilDoc[] = R"DOC(
Ceil Operator. Computes ceil of x element-wise.
$$out = \\left \\lceil x \\right \\rceil$$
$$out = \\lceil x \\rceil$$
)DOC";
UNUSED constexpr char FloorDoc[] = R"DOC(
Floor Activation Operator. Computes floor of x element-wise.
$$out = \\left \\lfloor x \\right \\rfloor$$
$$out = \\lfloor x \\rfloor$$
)DOC";
......@@ -273,7 +273,7 @@ $$out = cosh(x)$$
UNUSED constexpr char RoundDoc[] = R"DOC(
The OP rounds the values in the input to the nearest integer value.
.. code-block:: python
.. code-block:: text
input:
x.shape = [4]
......@@ -592,7 +592,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X",
"Input of STanh operator."
" A LoDTensor or Tensor with type float32, float64.");
" A Tensor with type float32, float64.");
AddOutput("Out", "Output of STanh operator. A Tensor with type float32.");
AddAttr<float>("scale_a", "The scale parameter of a for the input. ")
.SetDefault(0.67f);
......
......@@ -82,7 +82,7 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"The 1st input of cos_sim op, LoDTensor with shape ``[N_1, N_2, "
"The 1st input of cos_sim op, Tensor with shape ``[N_1, N_2, "
"..., N_k]``, the data type is float32.");
AddInput("Y",
"The 2nd input of cos_sim op, Tensor with shape ``[N_1 or 1, N_2, "
......@@ -110,9 +110,6 @@ of input Y could be just 1 (different from input X), which will be
broadcasted to match the shape of input X before computing their cosine
similarity.
Both the input X and Y can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD information with input X.
)DOC");
}
};
......
......@@ -40,15 +40,11 @@ class ElementwiseMaxOpMaker : public ElementwiseOpMaker {
std::string GetEquation() const override { return "Out = max(X, Y)"; }
void AddInputX() override {
AddInput(
"X",
"(Variable), The first tensor holding the elements to be compared.");
AddInput("X", "The first tensor holding the elements to be compared.");
}
void AddInputY() override {
AddInput(
"Y",
"(Variable), The second tensor holding the elements to be compared.");
AddInput("Y", "The second tensor holding the elements to be compared.");
}
std::string GetOpFuntionality() const override {
......
......@@ -40,15 +40,11 @@ class ElementwiseMinOpMaker : public ElementwiseOpMaker {
std::string GetEquation() const override { return "Out = min(X, Y)"; }
void AddInputX() override {
AddInput(
"X",
"(Variable), The first tensor holding the elements to be compared.");
AddInput("X", "The first tensor holding the elements to be compared.");
}
void AddInputY() override {
AddInput(
"Y",
"(Variable), The second tensor holding the elements to be compared.");
AddInput("Y", "The second tensor holding the elements to be compared.");
}
std::string GetOpFuntionality() const override {
......
......@@ -1583,19 +1583,16 @@ def create_array(dtype):
@templatedoc()
def less_than(x, y, force_cpu=None, cond=None, name=None):
"""
:alias_main: paddle.less_than
:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than
:old_api: paddle.fluid.layers.less_than
${comment}
Args:
x(${x_type}): ${x_comment}.
y(${y_type}): ${y_comment}.
x(Tensor): ${x_comment}.
y(Tensor): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Variable, optional): Optional output which can be any created Variable
cond(Tensor, optional): Optional output which can be any created Tensor
that meets the requirements to store the result of *less_than*.
if cond is None, a new Varibale will be created to store the result.
if cond is None, a new Tensor will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
......@@ -1604,25 +1601,13 @@ def less_than(x, y, force_cpu=None, cond=None, name=None):
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name='x', shape=[2], dtype='float64')
y = fluid.layers.data(name='y', shape=[2], dtype='float64')
result = fluid.layers.less_than(x=x, y=y)
# The comment lists another available method.
# result = fluid.layers.fill_constant(shape=[2], dtype='float64', value=0)
# fluid.layers.less_than(x=x, y=y, cond=result)
# Create an executor using CPU as example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 2], [3, 4]]).astype(np.float64)
y_i = np.array([[2, 2], [1, 3]]).astype(np.float64)
result_value, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[result])
print(result_value) # [[True, False], [False, False]]
import paddle
x = paddle.to_tensor([1, 2, 3, 4], dtype='float32')
y = paddle.to_tensor([2, 2, 1, 3], dtype='float32')
result = paddle.less_than(x, y)
print(result) # [True, False, False, False]
"""
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_than")
......
......@@ -55,10 +55,11 @@ _two_bang_pattern_ = re.compile(r"!!([^!]+)!!")
def escape_math(text):
return _two_bang_pattern_.sub(
r'$$\1$$',
_single_dollar_pattern_.sub(r':math:`\1`',
_two_dollar_pattern_.sub(r"!!\1!!", text)))
#return _two_bang_pattern_.sub(
# r'$$\1$$',
# _single_dollar_pattern_.sub(r':math:\n`\1`',
# _two_dollar_pattern_.sub(r"!!\1!!", text)))
return _two_dollar_pattern_.sub(r':math:`\1`', text)
def _generate_doc_string_(op_proto,
......
......@@ -377,9 +377,7 @@ def edit_distance(input,
So the edit distance between A and B is 3.
The input is a LoDTensor or Tensor.
If it is a LoDTensor, The separation is specified by the LoD information.
If it is a Tensor, The input_length and label_length should be supported.
The input is a Tensor, the input_length and label_length should be supported.
The `batch_size` of labels should be same as `input`.
......@@ -388,59 +386,36 @@ def edit_distance(input,
the edit distance value will be divided by the length of label.
Parameters:
input(Variable): The input variable which is a tensor or LoDTensor, its rank should be equal to 2 and its data type should be int64.
label(Variable): The label variable which is a tensor or LoDTensor, its rank should be equal to 2 and its data type should be int64.
input(Tensor): The input tensor, its rank should be equal to 2 and its data type should be int64.
label(Tensor): The label tensor, its rank should be equal to 2 and its data type should be int64.
normalized(bool, default True): Indicated whether to normalize the edit distance.
ignored_tokens(list<int>, default None): Tokens that will be removed before
calculating edit distance.
input_length(Variable): The length for each sequence in `input` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
label_length(Variable): The length for each sequence in `label` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
input_length(Tensor): The length for each sequence in `input` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
label_length(Tensor): The length for each sequence in `label` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
NOTE: To be avoid unexpected result, the value of every elements in input_length and label_length should be equal to the value of the second dimension of input and label. For example, The input: [[1,2,3,4],[5,6,7,8],[9,10,11,12]], the shape of input is [3,4] and the input_length should be [4,4,4]
NOTE: This Api is different from fluid.metrics.EditDistance
Returns:
Tuple:
distance(Variable): edit distance result, its data type is float32, and its shape is (batch_size, 1).
sequence_num(Variable): sequence number, its data type is float32, and its shape is (1,).
distance(Tensor): edit distance result, its data type is float32, and its shape is (batch_size, 1).
sequence_num(Tensor): sequence number, its data type is float32, and its shape is (1,).
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# using LoDTensor
x_lod = fluid.data(name='x_lod', shape=[None,1], dtype='int64', lod_level=1)
y_lod = fluid.data(name='y_lod', shape=[None,1], dtype='int64', lod_level=1)
distance_lod, seq_num_lod = fluid.layers.edit_distance(input=x_lod, label=y_lod)
# using Tensor
input_data = np.array([[1,2,3],[4,5,6],[4,4,4],[1,1,1]]).astype('int64')
label_data = np.array([[1,3,4,1],[4,5,8,1],[7,7,7,1],[1,1,1,1]]).astype('int64')
input_len = np.array([3,3,3,3]).astype('int64')
label_len = np.array([4,4,4,4]).astype('int64')
input_t = fluid.data(name='input', shape=[None,3], dtype='int64')
label_t = fluid.data(name='label', shape=[None,4], dtype='int64')
input_len_t = fluid.data(name='input_length', shape=[None], dtype='int64')
label_len_t = fluid.data(name='label_length', shape=[None], dtype='int64')
import paddle
import paddle.nn.functional as F
distance, sequence_num = fluid.layers.edit_distance(input=input_t, label=label_t, input_length=input_len_t, label_length=label_len_t,normalized=False)
input = paddle.to_tensor([[1,2,3],[4,5,6],[4,4,4],[1,1,1]], dtype='int64')
label = paddle.to_tensor([[1,3,4,1],[4,5,8,1],[7,7,7,1],[1,1,1,1]], dtype='int64')
input_len = paddle.to_tensor([3,3,3,3], dtype='int64')
label_len = paddle.to_tensor([4,4,4,4], dtype='int64')
# print(input_data.shape, label_data.shape)
# ((4,3), (4,4))
distance, sequence_num = F.loss.edit_distance(input=input, label=label, input_length=input_len, label_length=label_len, normalized=False)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
dis, seq_num = exe.run(fluid.default_main_program(),
feed={"input":input_data,
"label":label_data,
"input_length": input_len,
"label_length": label_len},
fetch_list=[distance,sequence_num])
# print(dis)
# print(distance)
# [[3.]
# [2.]
# [4.]
......@@ -451,7 +426,7 @@ def edit_distance(input,
# [1. ]
# [0.25]
#
# print(seq_num)
# print(sequence_num)
# [4]
"""
......@@ -1434,18 +1409,15 @@ def sigmoid_cross_entropy_with_logits(x,
name=None,
normalize=False):
"""
:alias_main: paddle.nn.functional.sigmoid_cross_entropy_with_logits
:alias: paddle.nn.functional.sigmoid_cross_entropy_with_logits,paddle.nn.functional.loss.sigmoid_cross_entropy_with_logits
:old_api: paddle.fluid.layers.sigmoid_cross_entropy_with_logits
${comment}
Args:
x(Variable): a 2-D tensor with shape N x D, where N is the batch size and
x(Tensor): a 2-D tensor with shape N x D, where N is the batch size and
D is the number of classes. This input is a tensor of logits computed
by the previous operator. Logits are unscaled log probabilities given
as log(p/(1-p)) The data type should be float32 or float64.
label (Variable): a 2-D tensor of the same type and shape as X.
label (Tensor): a 2-D tensor of the same type and shape as X.
This input is a tensor of probabalistic labels for each logit.
ignore_index(int): Specifies a target value that is ignored and
does not contribute to the input gradient.
......@@ -1456,22 +1428,19 @@ def sigmoid_cross_entropy_with_logits(x,
targets != ignore_index.
Returns:
out(${out_type}): ${out_comment}
out(Tensor): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data', shape=[10], dtype='float32')
label = fluid.data(
name='data', shape=[10], dtype='float32')
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=input,
label=label,
ignore_index=-1,
normalize=True) # or False
# loss = fluid.layers.reduce_sum(loss) # summation of loss
import paddle
input = paddle.rand(shape=[10], dtype='float32')
label = paddle.rand(shape=[10], dtype='float32')
loss = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(input, label,
ignore_index=-1, normalize=True)
print(loss)
"""
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'sigmoid_cross_entropy_with_logits')
......@@ -1619,47 +1588,44 @@ def huber_loss(input, label, delta):
@templatedoc()
def kldiv_loss(x, target, reduction='mean', name=None):
"""
:alias_main: paddle.nn.functional.kldiv_loss
:alias: paddle.nn.functional.kldiv_loss,paddle.nn.functional.loss.kldiv_loss
:old_api: paddle.fluid.layers.kldiv_loss
${comment}
Args:
x (Variable): ${x_comment}
target (Variable): ${target_comment}
reduction (Variable): ${reduction_comment}
x (Tensor): ${x_comment}
target (Tensor): ${target_comment}
reduction (Tensor): ${reduction_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable(Tensor): The KL divergence loss. The data type is same as input tensor
Tensor: The KL divergence loss. The data type is same as input tensor
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# 'batchmean' reduction, loss shape will be [N]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2]
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean') # shape=[-1]
x = paddle.rand(shape=[3,4,2,2], dtype='float32')
target = paddle.rand(shape=[3,4,2,2], dtype='float32')
# 'batchmean' reduction, loss shape will be [1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean')
print(loss.shape) # shape=[1]
# 'mean' reduction, loss shape will be [1]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2]
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean') # shape=[1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean')
print(loss.shape) # shape=[1]
# 'sum' reduction, loss shape will be [1]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2]
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum') # shape=[1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum')
print(loss.shape) # shape=[1]
# 'none' reduction, loss shape is same with X shape
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2]
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none') # shape=[-1, 4, 2, 2]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none')
print(loss.shape) # shape=[3, 4, 2, 2]
"""
helper = LayerHelper('kldiv_loss', **locals())
......
......@@ -912,19 +912,22 @@ def cos_sim(X, Y):
${comment}
Args:
X (Variable): ${x_comment}.
Y (Variable): ${y_comment}.
X (Tensor): ${x_comment}.
Y (Tensor): ${y_comment}.
Returns:
A Variable holding LoDTensor representing the output of cosine(X, Y).
A Tensor representing the output of cosine(X, Y).
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7], dtype='float32')
y = fluid.data(name='y', shape=[1, 7], dtype='float32')
out = fluid.layers.cos_sim(x, y)
import paddle
x = paddle.rand(shape=[3, 7], dtype='float32')
y = paddle.rand(shape=[1, 7], dtype='float32')
out = paddle.fluid.layers.cos_sim(x, y)
print(out)
"""
check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim')
check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim')
......@@ -1116,12 +1119,11 @@ def chunk_eval(input,
type correctly.
Args:
input (Variable): A Tensor or LoDTensor, representing the predicted labels
from the network. When it is a Tensor, its shape would be `[N, M, 1]`,
where `N` stands for batch size, `M` for sequence length; When it is
a LoDTensor, its shape would be `[N, 1]` where `N` stands for the total
sequence lengths in this mini-batch. The data type should be int64.
label (Variable): A Tensor or LoDTensor representing the ground-truth labels.
input (Tensor): A Tensor representing the predicted labels
from the network. Its shape would be `[N, M, 1]`,
where `N` stands for batch size, `M` for sequence length.
The data type should be int64.
label (Tensor): A Tensor representing the ground-truth labels.
It should have the same shape, lod and data type as ``input`` .
chunk_scheme (str): Indicate the tagging schemes used here. The value must
be IOB, IOE, IOBES or plain.
......@@ -1129,9 +1131,8 @@ def chunk_eval(input,
excluded_chunk_types (list, optional): Indicate the chunk types shouldn't
be taken into account. It should be a list of chunk type ids(integer).
Default None.
seq_length(Variable, optional): A 1D Tensor containing the length of each
sequence when ``input`` and ``label`` are Tensor. It needn't be
provided if ``input`` and ``label`` are LoDTensor. Default None.
seq_length(Tensor, optional): A 1D Tensor containing the length of each
sequence when ``input`` and ``label`` are Tensor. Default None.
Returns:
tuple: A tuple including precision, recall, F1-score, chunk number detected, \
......@@ -1230,7 +1231,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1):
.. math::
Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}
Out[i, j] = \\frac{\\exp(X[i, j])}{\\sum_j(exp(X[i, j])}
Example:
......@@ -1280,7 +1281,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1):
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Args:
input (Variable): The input variable. A multi-dimension ``Tensor`` with type float32 or float64.
input (Tensor): The input tensor. A multi-dimension ``Tensor`` with type float32 or float64.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. To improve numerical stability, set use_cudnn to \
False by default.
......@@ -1288,27 +1289,33 @@ def softmax(input, use_cudnn=True, name=None, axis=-1):
will be named automatically. Default: None.
axis (int, optional): The index of dimension to perform softmax calculations, it should
be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
input variable. Default: -1. -1 means the last dimension.
input tensor. Default: -1. -1 means the last dimension.
Returns:
Variable: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
Tensor: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]], dtype='float32')
y = F.softmax(x, axis=1)
print(y)
# [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
# [0.01786798, 0.01786798, 0.04661262, 0.04661262],
# [0.97555870, 0.97555870, 0.93623954, 0.93623954]],
# [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
# [0.26762316, 0.26762316, 0.26762316, 0.26762316],
# [0.72747517, 0.72747517, 0.72747517, 0.72747517]]]
data = fluid.data(name="input", shape=[-1, 3],dtype="float32")
result = fluid.layers.softmax(data,axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3, 3).astype("float32")
output= exe.run(feed={"input": x},
fetch_list=[result[0]])
print(output)
"""
if in_dygraph_mode():
......@@ -9539,9 +9546,6 @@ def pow(x, factor=1.0, name=None):
@templatedoc()
def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
:alias_main: paddle.stanh
:alias: paddle.stanh,paddle.tensor.stanh,paddle.tensor.math.stanh
:old_api: paddle.fluid.layers.stanh
${comment}
Args:
......@@ -9552,27 +9556,24 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
will be named automatically.
Returns:
output(${out_type}): ${out_comment}.
output(Tensor): ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="input", shape=[-1, 3])
result = fluid.layers.stanh(data,scale_a=0.67, scale_b=1.72)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.random(size=(3, 3)).astype('float32')
output= exe.run(feed={"input": x},
fetch_list=[result])
print(output)
import paddle
#[array([[0.626466 , 0.89842904, 0.7501062 ],
# [0.25147712, 0.7484996 , 0.22902708],
# [0.62705994, 0.23110689, 0.56902856]], dtype=float32)]
data = paddle.rand(shape=[3, 3], dtype='float32')
output = paddle.stanh(data, scale_a=0.67, scale_b=1.72)
print(data)
# [[0.19412413, 0.66871136, 0.77059180],
# [0.89738929, 0.35827777, 0.60592669],
# [0.66346580, 0.78424633, 0.46533889]]
print(output)
# [[0.22245567, 0.72288811, 0.81671900],
# [0.92525512, 0.40512756, 0.66227961],
# [0.71790355, 0.82885355, 0.51953089]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
......@@ -9857,20 +9858,12 @@ def leaky_relu(x, alpha=0.02, name=None):
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
# Graph Organizing
x = fluid.layers.data(name="x", shape=[2], dtype="float32")
res = fluid.layers.leaky_relu(x, alpha=0.1)
x = paddle.to_tensor([[-1, 2], [3, -4]], dtype='float32')
y = paddle.fluid.layers.leaky_relu(x, alpha=0.1)
print(y) # [[-0.1, 2], [3, -0.4]]
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[-1, 2], [3, -4]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[-0.1, 2], [3, -0.4]]
"""
return paddle.nn.functional.leaky_relu(x, alpha, name)
......@@ -12172,11 +12165,10 @@ def logical_and(x, y, out=None, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([True])
y = paddle.to_tensor([True, False, True, False])
res = paddle.logical_and(x, y)
print(res.numpy()) # [True False True False]
print(res) # [True False True False]
"""
return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
......@@ -12210,13 +12202,12 @@ def logical_or(x, y, out=None, name=None):
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([True, False], dtype=np.bool).reshape(2, 1)
y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.logical_or(x, y)
print(res.numpy()) # [[ True True] [ True False]]
print(res) # [[ True True] [ True False]]
"""
return _logical_op(
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)
......@@ -12250,13 +12241,12 @@ def logical_xor(x, y, out=None, name=None):
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([True, False], dtype=np.bool).reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2])
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.logical_xor(x, y)
print(res.numpy()) # [[False, True], [ True, False]]
print(res) # [[False, True], [ True, False]]
"""
return _logical_op(
op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)
......@@ -12265,9 +12255,6 @@ def logical_xor(x, y, out=None, name=None):
@templatedoc()
def logical_not(x, out=None, name=None):
"""
:alias_main: paddle.logical_not
:alias: paddle.logical_not, paddle.tensor.logical_not, paddle.tensor.logic.logical_not
:old_api: paddle.fluid.layers.logical_not
``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``x`` and ``out`` are N-dim boolean ``Variable``.
Each element of ``out`` is calculated by
......@@ -12277,21 +12264,21 @@ def logical_not(x, out=None, name=None):
out = !x
Args:
x(${x_type}): ${x_comment}.
out(Variable): The ``Variable`` that specifies the output of the operator, which can be any ``Variable`` that has been created in the program. The default value is None, and a new ``Variable` will be created to save the output.
x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output.
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Tensor: ${out_comment}
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([True, False, True, False])
res = paddle.logical_not(x)
print(res.numpy()) # [False True False True]
print(res) # [False True False True]
"""
return _logical_op(
......
......@@ -91,11 +91,10 @@ Examples:
import paddle
import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.sigmoid(x)
print(out.numpy())
print(out)
# [0.40131234 0.450166 0.52497919 0.57444252]
""")
......@@ -106,11 +105,10 @@ Examples:
import paddle
import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.log_sigmoid(x)
print(out.numpy())
print(out)
# [-0.91301525 -0.79813887 -0.64439666 -0.55435524]
""")
......@@ -120,11 +118,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.exp(x)
print(out.numpy())
print(out)
# [0.67032005 0.81873075 1.10517092 1.34985881]
""")
......@@ -134,11 +131,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.tanh(x)
print(out.numpy())
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
""")
......@@ -148,11 +144,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.atan(x)
print(out.numpy())
print(out)
# [-0.38050638 -0.19739556 0.09966865 0.29145679]
""")
......@@ -164,8 +159,6 @@ Examples:
import paddle
import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
......@@ -176,11 +169,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
out = paddle.sqrt(x)
print(out.numpy())
print(out)
# [0.31622777 0.4472136 0.54772256 0.63245553]
""")
......@@ -202,11 +194,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.abs(x)
print(out.numpy())
print(out)
# [0.4 0.2 0.1 0.3]
""")
......@@ -216,11 +207,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.ceil(x)
print(out.numpy())
print(out)
# [-0. -0. 1. 1.]
""")
......@@ -230,11 +220,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.floor(x)
print(out.numpy())
print(out)
# [-1. -1. 0. 0.]
""")
......@@ -244,11 +233,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.cos(x)
print(out.numpy())
print(out)
# [0.92106099 0.98006658 0.99500417 0.95533649]
""")
......@@ -258,11 +246,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.acos(x)
print(out.numpy())
print(out)
# [1.98231317 1.77215425 1.47062891 1.26610367]
""")
......@@ -272,11 +259,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.sin(x)
print(out.numpy())
print(out)
# [-0.38941834 -0.19866933 0.09983342 0.29552021]
""")
......@@ -286,11 +272,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.asin(x)
print(out.numpy())
print(out)
# [-0.41151685 -0.20135792 0.10016742 0.30469265]
""")
......@@ -300,11 +285,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.cosh(x)
print(out.numpy())
print(out)
# [1.08107237 1.02006676 1.00500417 1.04533851]
""")
......@@ -314,11 +298,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.sinh(x)
print(out.numpy())
print(out)
# [-0.41075233 -0.201336 0.10016675 0.30452029]
""")
......@@ -328,11 +311,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
out = paddle.round(x)
print(out.numpy())
print(out)
# [-1. -0. 1. 2.]
""")
......@@ -342,11 +324,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.reciprocal(x)
print(out.numpy())
print(out)
# [-2.5 -5. 10. 3.33333333]
""")
......@@ -356,11 +337,10 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.square(x)
print(out.numpy())
print(out)
# [0.16 0.04 0.01 0.09]
""")
......@@ -372,8 +352,6 @@ Examples:
import paddle
import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
......@@ -386,8 +364,6 @@ Examples:
import paddle
import paddle.nn.functional as F
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
......@@ -722,9 +698,8 @@ Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.erf(x)
print(out.numpy())
print(out)
# [-0.42839236 -0.22270259 0.11246292 0.32862676]
"""
......@@ -1271,28 +1271,26 @@ def has_nan(x):
def isfinite(x):
"""
:alias_main: paddle.isfinite
:alias: paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite
:old_api: paddle.fluid.layers.isfinite
Test if any of x contains an infinity/NAN number. If all the elements are finite,
returns true, else false.
Args:
x(variable): The Tensor/LoDTensor to be checked.
x(Tensor): The Tensor to be checked.
Returns:
Variable: The tensor variable storing the output, contains a bool value.
Tensor: The tensor storing the output, contains a bool value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
var = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
out = fluid.layers.isfinite(var)
import paddle
x = paddle.rand(shape=[4, 6], dtype='float32')
y = paddle.fluid.layers.isfinite(x)
print(y)
"""
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"isfinite")
......
......@@ -120,11 +120,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
import paddle
paddle.disable_static()
input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32')
label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32')
output = paddle.nn.functional.binary_cross_entropy(input, label)
print(output.numpy()) # [0.65537095]
print(output) # [0.65537095]
"""
if reduction not in ['sum', 'mean', 'none']:
......@@ -200,16 +199,16 @@ def binary_cross_entropy_with_logits(logit,
.. math::
Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit))
We know that :math:`\\sigma(Logit) = \\frac{1}{1 + \\e^{-Logit}}`. By substituting this we get:
We know that :math:`\\sigma(Logit) = \\frac{1}{1 + e^{-Logit}}`. By substituting this we get:
.. math::
Out = Logit - Logit * Labels + \\log(1 + \\e^{-Logit})
Out = Logit - Logit * Labels + \\log(1 + e^{-Logit})
For stability and to prevent overflow of :math:`\\e^{-Logit}` when Logit < 0,
For stability and to prevent overflow of :math:`e^{-Logit}` when Logit < 0,
we reformulate the loss as follows:
.. math::
Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + \\e^{-\|Logit\|})
Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + e^{-\|Logit\|})
Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the
weight tensor on the loss `Out`. The ``weight`` tensor will attach different
......@@ -254,11 +253,11 @@ def binary_cross_entropy_with_logits(logit,
.. code-block:: python
import paddle
paddle.disable_static()
logit = paddle.to_tensor([5.0, 1.0, 3.0])
label = paddle.to_tensor([1.0, 0.0, 1.0])
output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label)
print(output.numpy()) # [0.45618808]
print(output) # [0.45618808]
"""
if reduction not in ['sum', 'mean', 'none']:
......@@ -577,13 +576,12 @@ def margin_ranking_loss(input,
.. code-block:: python
import paddle
paddle.disable_static()
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
print(loss.numpy()) # [0.75]
print(loss) # [0.75]
"""
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
......@@ -651,22 +649,22 @@ def l1_loss(input, label, reduction='mean', name=None):
If `reduction` set to ``'none'``, the loss is:
.. math::
Out = \lvert input - label\rvert
Out = \\lvert input - label \\rvert
If `reduction` set to ``'mean'``, the loss is:
.. math::
Out = MEAN(\lvert input - label\rvert)
Out = MEAN(\\lvert input - label \\rvert)
If `reduction` set to ``'sum'``, the loss is:
.. math::
Out = SUM(\lvert input - label\rvert)
Out = SUM(\\lvert input - label\\rvert)
Parameters:
input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64.
label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64, int32, int64.
input (Tensor): The input tensor. The shapes is [N, `*`], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64.
label (Tensor): label. The shapes is [N, `*`], same shape as ``input`` . It's data type should be float32, float64, int32, int64.
reduction (str, optional): Indicate the reduction to apply to the loss,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If `reduction` is ``'none'``, the unreduced loss is returned;
......@@ -674,12 +672,15 @@ def l1_loss(input, label, reduction='mean', name=None):
If `reduction` is ``'sum'``, the reduced sum loss is returned.
Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the L1 Loss of Tensor ``input`` and ``label``.
If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` .
If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1].
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
......
......@@ -40,15 +40,15 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
.. math::
y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) }
y = \\frac{x}{ \\max\\left( \\lvert \\lvert x \\rvert \\rvert_p, epsilon\\right) }
.. math::
\lvert \lvert x \rvert \rvert_p = \left(\sum_i {\lvert x_i\rvert^p} \right)^{1/p}
\\lvert \\lvert x \\rvert \\rvert_p = \\left( \\sum_i {\\lvert x_i \\rvert^p} \\right)^{1/p}
where, :math:`\sum_i{\lvert x_i\rvert^p}` is calculated along the ``axis`` dimension.
where, :math:`\\sum_i{\\lvert x_i \\rvert^p}` is calculated along the ``axis`` dimension.
Args:
Parameters:
x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
p (float|int, optional): The exponent value in the norm formulation. Default: 2
axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension.
......
......@@ -838,9 +838,13 @@ class MarginRankingLoss(fluid.dygraph.Layer):
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Shape:
input: N-D Tensor, the shape is [N, *], N is batch size and `*` means any number of additional dimensions., available dtype is float32, float64.
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.
other: N-D Tensor, `other` have the same shape and dtype as `input`.
label: N-D Tensor, label have the same shape and dtype as `input`.
output: If :attr:`reduction` is ``'mean'`` or ``'sum'`` , the out shape is :math:`[1]`, otherwise the shape is the same as `input` .The same dtype as input tensor.
Returns:
......@@ -851,14 +855,13 @@ class MarginRankingLoss(fluid.dygraph.Layer):
.. code-block:: python
import paddle
paddle.disable_static()
input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32")
other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32")
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label)
print(loss.numpy()) # [0.75]
print(loss) # [0.75]
"""
def __init__(self, margin=0.0, reduction='mean', name=None):
......
......@@ -54,9 +54,6 @@ __all__ = [
def equal_all(x, y, name=None):
"""
:alias_main: paddle.equal_all
:alias: paddle.equal_all,paddle.tensor.equal_all,paddle.tensor.logic.equal_all
This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
**NOTICE**: The output of this OP has no gradient.
......@@ -75,14 +72,13 @@ def equal_all(x, y, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 2, 3])
z = paddle.to_tensor([1, 4, 3])
result1 = paddle.equal_all(x, y)
print(result1.numpy()) # result1 = [True ]
print(result1) # result1 = [True ]
result2 = paddle.equal_all(x, z)
print(result2.numpy()) # result2 = [False ]
print(result2) # result2 = [False ]
"""
helper = LayerHelper("equal_all", **locals())
......@@ -122,8 +118,6 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
......@@ -189,10 +183,9 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
@templatedoc()
def equal(x, y, name=None):
"""
:alias_main: paddle.equal
:alias: paddle.equal,paddle.tensor.equal,paddle.tensor.logic.equal
This layer returns the truth value of :math:`x == y` elementwise.
**NOTICE**: The output of this OP has no gradient.
Args:
......@@ -210,11 +203,10 @@ def equal(x, y, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.equal(x, y)
print(result1.numpy()) # result1 = [True False False]
print(result1) # result1 = [True False False]
"""
if in_dygraph_mode():
return core.ops.equal(x, y)
......@@ -236,10 +228,8 @@ def equal(x, y, name=None):
@templatedoc()
def greater_equal(x, y, name=None):
"""
:alias_main: paddle.greater_equal
:alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
**NOTICE**: The output of this OP has no gradient.
Args:
......@@ -252,13 +242,13 @@ def greater_equal(x, y, name=None):
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_equal(x, y)
print(result1.numpy()) # result1 = [True False True]
print(result1) # result1 = [True False True]
"""
if in_dygraph_mode():
return core.ops.greater_equal(x, y)
......@@ -282,10 +272,8 @@ def greater_equal(x, y, name=None):
@templatedoc()
def greater_than(x, y, name=None):
"""
:alias_main: paddle.greater_than
:alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
**NOTICE**: The output of this OP has no gradient.
Args:
......@@ -298,13 +286,13 @@ def greater_than(x, y, name=None):
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_than(x, y)
print(result1.numpy()) # result1 = [False False True]
print(result1) # result1 = [False False True]
"""
if in_dygraph_mode():
return core.ops.greater_than(x, y)
......@@ -328,10 +316,8 @@ def greater_than(x, y, name=None):
@templatedoc()
def less_equal(x, y, name=None):
"""
:alias_main: paddle.less_equal
:alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
**NOTICE**: The output of this OP has no gradient.
Args:
......@@ -345,13 +331,13 @@ def less_equal(x, y, name=None):
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_equal(x, y)
print(result1.numpy()) # result1 = [True True False]
print(result1) # result1 = [True True False]
"""
if in_dygraph_mode():
return core.ops.less_equal(x, y)
......@@ -373,10 +359,8 @@ def less_equal(x, y, name=None):
@templatedoc()
def less_than(x, y, name=None):
"""
:alias_main: paddle.less_than
:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than
This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
**NOTICE**: The output of this OP has no gradient.
Args:
......@@ -390,13 +374,13 @@ def less_than(x, y, name=None):
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_than(x, y)
print(result1.numpy()) # result1 = [False True False]
print(result1) # result1 = [False True False]
"""
if in_dygraph_mode():
return core.ops.less_than(x, y)
......@@ -418,10 +402,8 @@ def less_than(x, y, name=None):
@templatedoc()
def not_equal(x, y, name=None):
"""
:alias_main: paddle.not_equal
:alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
**NOTICE**: The output of this OP has no gradient.
Args:
......@@ -438,11 +420,10 @@ def not_equal(x, y, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.not_equal(x, y)
print(result1.numpy()) # result1 = [False True True]
print(result1) # result1 = [False True True]
"""
if in_dygraph_mode():
return core.ops.not_equal(x, y)
......
......@@ -166,8 +166,6 @@ def pow(x, y, name=None):
import paddle
paddle.disable_static()
# example 1: y is a float
x = paddle.to_tensor([1, 2, 3])
y = 2
......@@ -474,32 +472,30 @@ Examples:
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor([[1, 2], [3, 4]])
y = paddle.to_tensor([[5, 6], [7, 8]])
res = paddle.maximum(x, y)
print(res.numpy())
print(res)
#[[5. 6.]
# [7. 8.]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([1, 2])
res = paddle.maximum(x, y, axis=1)
print(res.numpy())
print(res)
#[[[1. 2. 3.]
# [2. 2. 3.]]]
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.maximum(x, y)
print(res.numpy())
print(res)
#[ 2. 4. nan]
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, 4, 5], dtype='float32')
res = paddle.maximum(x, y)
print(res.numpy())
print(res)
#[ 5. 4. inf]
"""
op_type = 'elementwise_max'
......@@ -517,33 +513,31 @@ Examples:
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32')
res = paddle.minimum(x, y)
print(res.numpy())
print(res)
#[[1. 2.]
# [3. 4.]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32')
y = paddle.to_tensor([1, 2], dtype='float32')
res = paddle.minimum(x, y, axis=1)
print(res.numpy())
print(res)
#[[[1. 1. 1.]
# [2. 2. 2.]]]
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.minimum(x, y)
print(res.numpy())
print(res)
#[ 1. 3. nan]
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, 4, 5], dtype='float32')
res = paddle.minimum(x, y)
print(res.numpy())
print(res)
#[1. 3. 5.]
"""
op_type = 'elementwise_min'
......@@ -584,7 +578,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.
axis (int|list|tuple, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`x` and return a
Tensor variable with a single element, otherwise must be in the
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
......@@ -785,8 +779,6 @@ def add_n(inputs, name=None):
def mm(input, mat2, name=None):
"""
:alias_main: paddle.mm
:alias: paddle.mm,paddle.tensor.mm,paddle.tensor.math.mm
Applies matrix multiplication to two tensors.
......@@ -799,41 +791,42 @@ def mm(input, mat2, name=None):
removed after matrix multiplication.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
mat2 (Variable): The input variable which is a Tensor or LoDTensor.
x (Tensor): The input tensor which is a Tensor.
mat2 (Tensor): The input tensor which is a Tensor.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Tensor: The product Tensor.
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], mat2: [B, ..., K, N]
# fluid.layers.matmul(x, mat2) # out: [B, ..., M, N]
# paddle.matmul(x, mat2) # out: [B, ..., M, N]
# x: [B, M, K], mat2: [B, K, N]
# fluid.layers.matmul(x, mat2) # out: [B, M, N]
# paddle.matmul(x, mat2) # out: [B, M, N]
# x: [B, M, K], mat2: [K, N]
# fluid.layers.matmul(x, mat2) # out: [B, M, N]
# paddle.matmul(x, mat2) # out: [B, M, N]
# x: [M, K], mat2: [K, N]
# fluid.layers.matmul(x, mat2) # out: [M, N]
# paddle.matmul(x, mat2) # out: [M, N]
# x: [B, M, K], mat2: [K]
# fluid.layers.matmul(x, mat2) # out: [B, M]
# paddle.matmul(x, mat2) # out: [B, M]
# x: [K], mat2: [K]
# fluid.layers.matmul(x, mat2) # out: [1]
# paddle.matmul(x, mat2) # out: [1]
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3], dtype='float32')
mat2 = fluid.data(name='mat2', shape=[3, 2], dtype='float32')
out = paddle.mm(x, mat2) # out shape is [2, 2]
x = paddle.rand(shape=[2, 3], dtype='float32')
y = paddle.rand(shape=[3, 2], dtype='float32')
out = paddle.mm(x, y)
print(out.shape) # [2, 2]
"""
if in_dygraph_mode():
out = _varbase_creator(dtype=input.dtype)
......@@ -921,7 +914,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
out = paddle.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )
print( out.numpy() )
print(out)
# [[10.5 10.5]
# [10.5 10.5]]
"""
......@@ -1085,7 +1078,7 @@ def max(x, axis=None, keepdim=False, name=None):
float64, int32, int64.
axis(list|int, optional): The axis along which the maximum is computed.
If :attr:`None`, compute the maximum over all elements of
`x` and return a Tensor variable with a single element,
`x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.
If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the
......@@ -1104,37 +1097,35 @@ def max(x, axis=None, keepdim=False, name=None):
import paddle
paddle.disable_static()
# data_x is a variable with shape [2, 4]
# data_x is a Tensor with shape [2, 4]
# the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
result1 = paddle.max(x)
print(result1.numpy())
print(result1)
#[0.9]
result2 = paddle.max(x, axis=0)
print(result2.numpy())
print(result2)
#[0.2 0.3 0.6 0.9]
result3 = paddle.max(x, axis=-1)
print(result3.numpy())
print(result3)
#[0.9 0.7]
result4 = paddle.max(x, axis=1, keepdim=True)
print(result4.numpy())
print(result4)
#[[0.9]
# [0.7]]
# data_y is a variable with shape [2, 2, 2]
# data_y is a Tensor with shape [2, 2, 2]
# the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.max(y, axis=[1, 2])
print(result5.numpy())
print(result5)
#[4. 8.]
result6 = paddle.max(y, axis=[0, 1])
print(result6.numpy())
print(result6)
#[7. 8.]
"""
......@@ -1179,7 +1170,7 @@ def min(x, axis=None, keepdim=False, name=None):
x(Tensor): A tensor, the data type is float32, float64, int32, int64.
axis(list|int, optional): The axis along which the minimum is computed.
If :attr:`None`, compute the minimum over all elements of
`x` and return a Tensor variable with a single element,
`x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim, x.ndim)`.
If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the
......@@ -1198,35 +1189,33 @@ def min(x, axis=None, keepdim=False, name=None):
import paddle
paddle.disable_static()
# x is a tensor with shape [2, 4]
# the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
result1 = paddle.min(x)
print(result1.numpy())
print(result1)
#[0.1]
result2 = paddle.min(x, axis=0)
print(result2.numpy())
print(result2)
#[0.1 0.2 0.5 0.7]
result3 = paddle.min(x, axis=-1)
print(result3.numpy())
print(result3)
#[0.2 0.1]
result4 = paddle.min(x, axis=1, keepdim=True)
print(result4.numpy())
print(result4)
#[[0.2]
# [0.1]]
# y is a variable with shape [2, 2, 2]
# y is a Tensor with shape [2, 2, 2]
# the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.min(y, axis=[1, 2])
print(result5.numpy())
print(result5)
#[1. 5.]
result6 = paddle.min(y, axis=[0, 1])
print(result6.numpy())
print(result6)
#[1. 2.]
"""
......@@ -1265,6 +1254,7 @@ def min(x, axis=None, keepdim=False, name=None):
def log1p(x, name=None):
r"""
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x+1)
......@@ -1423,7 +1413,7 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None):
tensor1 = paddle.ones([2,2])
tensor2 = paddle.ones([2,2])
out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=0.5)
print(out.numpy())
print(out)
# [[1.5 1.5]
# [1.5 1.5]]
"""
......@@ -1442,8 +1432,6 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None):
def clip(x, min=None, max=None, name=None):
"""
**clip layer**
This operator clip all elements in input into the range [ min, max ] and return
a resulting tensor as the following equation:
......@@ -1468,6 +1456,7 @@ def clip(x, min=None, max=None, name=None):
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32')
out1 = paddle.clip(x1, min=3.5, max=5.0)
out2 = paddle.clip(x1, min=2.5)
......@@ -1626,9 +1615,9 @@ def kron(x, y, name=None):
${comment}
Args:
x (Variable): the fist operand of kron op, data type: float16, float32,
x (Tensor): the fist operand of kron op, data type: float16, float32,
float64, int32 or int64.
y (Variable): the second operand of kron op, data type: float16,
y (Tensor): the second operand of kron op, data type: float16,
float32, float64, int32 or int64. Its data type should be the same
with x.
name(str, optional): The default value is None. Normally there is no
......@@ -1636,7 +1625,7 @@ ${comment}
refer to :ref:`api_guide_Name`.
Returns:
Variable: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x.
Tensor: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x.
Examples:
.. code-block:: python
......@@ -1755,10 +1744,10 @@ def isfinite(x, name=None):
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isfinite(x)
print(out.numpy()) # [False True True False True False False]
print(out) # [False True True False True False False]
"""
if in_dygraph_mode():
return core.ops.isfinite_v2(x)
......@@ -1784,10 +1773,9 @@ def isinf(x, name=None):
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isinf(x)
print(out.numpy()) # [ True False False True False False False]
print(out) # [ True False False True False False False]
"""
if in_dygraph_mode():
return core.ops.isinf_v2(x)
......@@ -1813,10 +1801,9 @@ def isnan(x, name=None):
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isnan(x)
print(out.numpy()) # [False False False False False True True]
print(out) # [False False False False False True True]
"""
if in_dygraph_mode():
return core.ops.isnan_v2(x)
......@@ -1947,10 +1934,9 @@ def tanh(x, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.tanh(x)
print(out.numpy())
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
if in_dygraph_mode():
......@@ -2008,7 +1994,7 @@ def all(x, axis=None, keepdim=False, name=None):
x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical and`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a
Tensor variable with a single element, otherwise must be in the
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
......@@ -2033,10 +2019,7 @@ def all(x, axis=None, keepdim=False, name=None):
import paddle.fluid.layers as layers
import numpy as np
# set as static mode
paddle.disable_static()
# x is a bool Tensor variable with following elements:
# x is a bool Tensor with following elements:
# [[True, False]
# [True, True]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
......@@ -2107,7 +2090,7 @@ def any(x, axis=None, keepdim=False, name=None):
x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical or`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a
Tensor variable with a single element, otherwise must be in the
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
......@@ -2132,10 +2115,7 @@ def any(x, axis=None, keepdim=False, name=None):
import paddle.fluid.layers as layers
import numpy as np
# set as static mode
paddle.disable_static()
# x is a bool Tensor variable with following elements:
# x is a bool Tensor with following elements:
# [[True, False]
# [False, False]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
......
......@@ -39,9 +39,6 @@ from paddle.common_ops_import import *
def argsort(x, axis=-1, descending=False, name=None):
"""
:alias_main: paddle.argsort
:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
This OP sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
Args:
......@@ -67,7 +64,6 @@ def argsort(x, axis=-1, descending=False, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
......@@ -78,21 +74,21 @@ def argsort(x, axis=-1, descending=False, name=None):
out1 = paddle.argsort(x=x, axis=-1)
out2 = paddle.argsort(x=x, axis=0)
out3 = paddle.argsort(x=x, axis=1)
print(out1.numpy())
print(out1)
#[[[0 3 1 2]
# [0 1 2 3]
# [2 3 0 1]]
# [[1 3 2 0]
# [0 1 2 3]
# [2 0 3 1]]]
print(out2.numpy())
print(out2)
#[[[0 1 1 1]
# [0 0 0 0]
# [1 1 1 0]]
# [[1 0 0 0]
# [1 1 1 1]
# [0 0 0 1]]]
print(out3.numpy())
print(out3)
#[[[1 1 1 2]
# [0 0 2 0]
# [2 2 0 1]]
......@@ -149,17 +145,16 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7],
[6,9,2,4]])
out1 = paddle.argmax(x)
print(out1.numpy()) # 2
print(out1) # 2
out2 = paddle.argmax(x, axis=1)
print(out2.numpy())
print(out2)
# [2 3 1]
out3 = paddle.argmax(x, axis=-1)
print(out3.numpy())
print(out3)
# [2 3 1]
"""
if axis is not None and not isinstance(axis, int):
......@@ -227,17 +222,16 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7],
[6,9,2,4]])
out1 = paddle.argmin(x)
print(out1.numpy()) # 4
print(out1) # 4
out2 = paddle.argmin(x, axis=1)
print(out2.numpy())
print(out2)
# [0 0 2]
out3 = paddle.argmin(x, axis=-1)
print(out3.numpy())
print(out3)
# [0 0 2]
"""
if axis is not None and not isinstance(axis, int):
......@@ -354,25 +348,23 @@ def nonzero(x, as_tuple=False):
Tensor. The data type is int64.
Examples:
.. code-block:: python
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]])
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]])
x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0])
x3 = paddle.to_tensor([0.0, 0.0, 0.0])
out_z1 = paddle.nonzero(x1)
print(out_z1.numpy())
print(out_z1)
#[[0 0]
# [1 1]
# [2 2]]
out_z1_tuple = paddle.nonzero(x1, as_tuple=True)
for out in out_z1_tuple:
print(out.numpy())
print(out)
#[[0]
# [1]
# [2]]
......@@ -380,21 +372,15 @@ def nonzero(x, as_tuple=False):
# [1]
# [2]]
out_z2 = paddle.nonzero(x2)
print(out_z2.numpy())
print(out_z2)
#[[1]
# [3]]
out_z2_tuple = paddle.nonzero(x2, as_tuple=True)
for out in out_z2_tuple:
print(out.numpy())
print(out)
#[[1]
# [3]]
out_z3 = paddle.nonzero(x3)
print(out_z3.numpy())
#[]
out_z3_tuple = paddle.nonzero(x3, as_tuple=True)
for out in out_z3_tuple:
print(out.numpy())
#[]
"""
list_out = []
shape = x.shape
......@@ -419,8 +405,6 @@ def nonzero(x, as_tuple=False):
def sort(x, axis=-1, descending=False, name=None):
"""
:alias_main: paddle.sort
:alias: paddle.sort,paddle.tensor.sort,paddle.tensor.search.sort
This OP sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
......@@ -439,10 +423,11 @@ def sort(x, axis=-1, descending=False, name=None):
Returns:
Tensor: sorted tensor(with the same shape and data type as ``x``).
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
......@@ -453,21 +438,21 @@ def sort(x, axis=-1, descending=False, name=None):
out1 = paddle.sort(x=x, axis=-1)
out2 = paddle.sort(x=x, axis=0)
out3 = paddle.sort(x=x, axis=1)
print(out1.numpy())
print(out1)
#[[[5. 5. 8. 9.]
# [0. 0. 1. 7.]
# [2. 4. 6. 9.]]
# [[2. 2. 4. 5.]
# [4. 7. 7. 9.]
# [0. 1. 6. 7.]]]
print(out2.numpy())
print(out2)
#[[[5. 2. 4. 2.]
# [0. 0. 1. 7.]
# [1. 7. 0. 4.]]
# [[5. 8. 9. 5.]
# [4. 7. 7. 9.]
# [6. 9. 2. 6.]]]
print(out3.numpy())
print(out3)
#[[[0. 0. 1. 4.]
# [5. 8. 2. 5.]
# [6. 9. 9. 7.]]
......@@ -610,7 +595,7 @@ def index_sample(x, index):
[500, 600, 700, 800],
[900, 1000, 1100, 1200]], dtype='int32')
out_z1 = paddle.index_sample(x, index)
print(out_z1.numpy())
print(out_z1)
#[[1. 2. 3.]
# [6. 7. 8.]
# [9. 9. 9.]]
......@@ -619,17 +604,17 @@ def index_sample(x, index):
# get the value of the element of the corresponding index in other tensors
top_value, top_index = paddle.topk(x, k=2)
out_z2 = paddle.index_sample(target, top_index)
print(top_value.numpy())
print(top_value)
#[[ 4. 3.]
# [ 8. 7.]
# [12. 11.]]
print(top_index.numpy())
print(top_index)
#[[3 2]
# [3 2]
# [3 2]]
print(out_z2.numpy())
print(out_z2)
#[[ 400 300]
# [ 800 700]
# [1200 1100]]
......@@ -673,7 +658,6 @@ def masked_select(x, mask, name=None):
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]])
......@@ -726,33 +710,31 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
import paddle
paddle.disable_static()
tensor_1 = paddle.to_tensor([1, 4, 5, 7])
value_1, indices_1 = paddle.topk(tensor_1, k=1)
print(value_1.numpy())
print(value_1)
# [7]
print(indices_1.numpy())
print(indices_1)
# [3]
tensor_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])
value_2, indices_2 = paddle.topk(tensor_2, k=1)
print(value_2.numpy())
print(value_2)
# [[7]
# [6]]
print(indices_2.numpy())
print(indices_2)
# [[3]
# [1]]
value_3, indices_3 = paddle.topk(tensor_2, k=1, axis=-1)
print(value_3.numpy())
print(value_3)
# [[7]
# [6]]
print(indices_3.numpy())
print(indices_3)
# [[3]
# [1]]
value_4, indices_4 = paddle.topk(tensor_2, k=1, axis=0)
print(value_4.numpy())
print(value_4)
# [[2 6 5 7]]
print(indices_4.numpy())
print(indices_4)
# [[1 1 0 0]]
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册