提交 df4b94d1 编写于 作者: Z zhupengyang 提交者: hong19860320

fix APIs: relu, relu6, hash (#20416) (#20535)

* fix APIs: relu, relu6, hash

test=develop
test=document_fix

* fix relu6 doc

test=develop
test=document_fix

* fix API.spec

test=develop
test=document_fix

* add description link for hash

test=develop
test=document_fix
上级 5cb959e4
......@@ -216,7 +216,7 @@ paddle.fluid.layers.scatter_nd (ArgSpec(args=['index', 'updates', 'shape', 'name
paddle.fluid.layers.sequence_scatter (ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'abe3f714120117a5a3d3e639853932bf'))
paddle.fluid.layers.random_crop (ArgSpec(args=['x', 'shape', 'seed'], varargs=None, keywords=None, defaults=(None,)), ('document', '44f35002962cf24e14dd2958f6584e3d'))
paddle.fluid.layers.mean_iou (ArgSpec(args=['input', 'label', 'num_classes'], varargs=None, keywords=None, defaults=None), ('document', 'dea29c0c3cdbd5b498afef60e58c9d7c'))
paddle.fluid.layers.relu (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0942c174f4f6fb274976d4357356f6a2'))
paddle.fluid.layers.relu (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c46011e3d848bc3dc650772b25fbbf10'))
paddle.fluid.layers.selu (ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '3ee40bc474b4bccdaf112d3f0d847318'))
paddle.fluid.layers.log (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7dc5fe72f1b3f0b6d903a2594de2521d'))
paddle.fluid.layers.crop (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '32196a194f757b4da114a595a5bc6414'))
......@@ -224,7 +224,7 @@ paddle.fluid.layers.crop_tensor (ArgSpec(args=['x', 'shape', 'offsets', 'name'],
paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6d49ba251e23f32cb09df54a851bb960'))
paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '1a177f30e5013fae7ee6c45860cf4946'))
paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '06f669c66b3b2cc1cef01dfb42a40f08'))
paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67'))
paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '56d3bb2e38cd4de769267c0ace5bbac2'))
paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '00d437d1e0d9450ea75a0495b93b54a7'))
paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.67, 1.7159, None)), ('document', 'd3f742178a7263adf5929153d104883d'))
paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591'))
......@@ -278,7 +278,7 @@ paddle.fluid.layers.affine_grid (ArgSpec(args=['theta', 'out_shape', 'name'], va
paddle.fluid.layers.sequence_reverse (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5b32ed21ab89140a8e758002923a0da3'))
paddle.fluid.layers.affine_channel (ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name', 'act'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None, None)), ('document', 'ecc4b1323028bde0518d666882d03515'))
paddle.fluid.layers.similarity_focus (ArgSpec(args=['input', 'axis', 'indexes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '57256fcb1119dc35ae031889fa601d61'))
paddle.fluid.layers.hash (ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', 'a0b73c21be618cec0281e7903039e5e3'))
paddle.fluid.layers.hash (ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', '1783e177b73f7049babe2f5a1ba418f9'))
paddle.fluid.layers.grid_sampler (ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '90c74742f48c70b103f1fbb9eb129066'))
paddle.fluid.layers.log_loss (ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None)), ('document', 'ef1701e11d60508fe8f02dd2a8c60bdf'))
paddle.fluid.layers.add_position_encoding (ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'bd8b28e6c1640b13a42b0524f86f7800'))
......
......@@ -490,14 +490,19 @@ $out = \max(0, x) + \min(0, \alpha * (e^x - 1))$
class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "Input of Relu6 operator");
AddOutput("Out", "Output of Relu6 operator");
AddAttr<float>("threshold", "The threshold value of Relu6")
AddInput("X",
"Input of relu6 operator, an N-D Tensor, "
"with data type float32, float64.");
AddOutput(
"Out",
"Output of relu6 operator, a Tensor with the same shape as input.");
AddAttr<float>("threshold",
"The threshold value of Relu6. Default is 6.0. ")
.SetDefault(6.0f);
AddComment(R"DOC(
Relu6 Activation Operator.
$out = \min(\max(0, x), 6)$
$out = \min(\max(0, x), threshold)$
)DOC");
}
......
......@@ -10797,32 +10797,34 @@ def log(x, name=None):
return out
@templatedoc()
def relu(x, name=None):
"""
Relu takes one input data (Tensor) and produces one output data (Tensor)
where the rectified linear function, y = max(0, x), is applied to
the tensor elementwise.
.. math::
Out = \\max(0, x)
${comment}
Args:
x (Variable): The input tensor.
name (str|None, default None): A name for this layer If set None,
the layer will be named automatically.
x(Variable): ${x_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: The output tensor with the same shape as input.
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32")
output = fluid.layers.relu(x)
"""
import numpy as np
in1 = np.array([[-1,0],[1,2.6]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu(x1)
print(out1.numpy())
# [[0. 0. ]
# [1. 2.6]]
"""
helper = LayerHelper('relu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
......@@ -11584,11 +11586,13 @@ def elu(x, alpha=1.0, name=None):
def relu6(x, threshold=6.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
threshold(${threshold_type}|6.0): ${threshold_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
threshold(float, optional): ${threshold_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
output(${out_type}): ${out_comment}
......@@ -11598,8 +11602,14 @@ def relu6(x, threshold=6.0, name=None):
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32")
y = fluid.layers.relu6(x, threshold=6.0)
import numpy as np
in1 = np.array([[-1,0],[2.5,7.8]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu6(x=x1, threshold=6.0)
print(out1.numpy())
# [[0. 0. ]
# [2.5 6. ]]
"""
helper = LayerHelper('relu6', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -14755,64 +14765,49 @@ def similarity_focus(input, axis, indexes, name=None):
def hash(input, hash_size, num_hash=1, name=None):
"""
Hash the input to an integer whose value is less than the given hash size.
This OP hash the input to an integer less than the hash_size.
The hash algorithm we used was xxHash - Extremely fast hash algorithm
(https://github.com/Cyan4973/xxHash/tree/v0.6.5)
A simple example as below:
.. code-block:: text
Given:
# shape [2, 2]
input.data =
[[1, 2],
[3, 4]]
hash_size = 10000
num_hash = 4
Then:
Hash op will take all number in input's 2nd dimension as hash algorithm's
input for each time. Each input will be hashed for 4 times, and get an
array whose length is 4. Each value in the array ranges from 0 to 9999.
# shape [2, 4]
output.data = [
[[9662, 9217, 1129, 8487],
[8310, 1327, 1654, 4567]],
]
Args:
input (Variable): The input variable which is a one-hot word. The
dimensions of the input variable must be 2. Both Tensor and LoDTensor are supported.
hash_size (int): The space size for hash algorithm. The output value
will keep in the range:math:`[0, hash_size - 1]`.
num_hash (int): The times of hash, default 1.
name (str, default None): The name of this layer.
input(Variable): A **Two-Dimensional** LoDTensor with type int32, int64.
**Only support LoDTensor**.
num_hash(int, optional): The times of hash, default is 1.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: The hash result variable, which the same variable type as `input`.
Variable: A LoDTensor with the same data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# titles has shape [batch, 1]
titles = fluid.layers.data(name='titles', shape=[1], dtype='int32', lod_level=0)
# hash_r has shape [batch, 2]
hash_r = fluid.layers.hash(name='hash_x', input=titles, num_hash=2, hash_size=1000)
place = fluid.core.CPUPlace()
x = fluid.data(name="x", shape=[1], dtype="int32", lod_level=1)
res = fluid.layers.hash(name="res",input=x, hash_size=1000, num_hash=4)
# titles has shape [batch, 1] and lod information
titles = fluid.layers.data(name='titles', shape=[1], dtype='int32', lod_level=1)
# hash_r has shape [batch, 2] and inherits lod information from titles
hash_r = fluid.layers.hash(name='hash_x', input=titles, num_hash=2, hash_size=1000)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
in1 = np.array([[1,2],[3,4]]).astype("int32")
print(in1)
x_i = fluid.core.LoDTensor()
x_i.set(in1,place)
x_i.set_recursive_sequence_lengths([[0,2]])
res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False)
print(np.array(res[0]))
# [[[722]
# [407]
# [337]
# [395]]
# [[603]
# [590]
# [386]
# [901]]]
"""
helper = LayerHelper('hash', **locals())
out = helper.create_variable_for_type_inference(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册