提交 055df470 编写于 作者: Y yuyang18

Polish code

上级 cbc1b7f1
......@@ -275,7 +275,7 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
"The value of threshold for HardShrink. [default: 0.5]")
.SetDefault(0.5f);
AddComment(R"DOC(
** HardShrink activation operator **
:strong:`HardShrink activation operator`
.. math::
out = \begin{cases}
......@@ -394,15 +394,16 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X", "Input of ThresholdedRelu operator");
AddOutput("Out", "Output of ThresholdedRelu operator");
AddAttr<float>("threshold", "The threshold location of activation")
AddAttr<float>("threshold",
"The threshold location of activation. [default 1.0].")
.SetDefault(1.0f);
AddComment(R"DOC(
ThresholdedRelu Activation Operator.
:strong:`ThresholdedRelu activation operator`
.. math::
out = \begin{cases}
x, \text{if } x > threshold \\
x, \text{if } x > threshold \\
0, \text{otherwise}
\end{cases}
)DOC");
......
......@@ -94,7 +94,7 @@ class RowConvOpMaker : public framework::OpProtoAndCheckerMaker {
"in this LodTensor is a matrix with shape T x N, i.e., the "
"same shape as X.");
AddComment(R"DOC(
** Row-convolution operator **
:strong:`Row-convolution operator`
The row convolution is called lookahead convolution. This operator was
introduced in the following paper for DeepSpeech2:
......
......@@ -40,7 +40,6 @@ __activations__ = [
'relu6',
'pow',
'stanh',
'thresholded_relu',
'hard_sigmoid',
'swish',
]
......@@ -91,8 +90,7 @@ def uniform_random(shape, dtype=None, min=None, max=None, seed=None):
return _uniform_random_(**kwargs)
uniform_random.__doc__ = _uniform_random_.__doc__ + "\n" \
+ """
uniform_random.__doc__ = _uniform_random_.__doc__ + """
Examples:
>>> result = fluid.layers.uniform_random(shape=[32, 784])
......@@ -112,8 +110,7 @@ def hard_shrink(x, threshold=None):
return _hard_shrink_(**kwargs)
hard_shrink.__doc__ = _hard_shrink_.__doc__ + "\n" \
+ """
hard_shrink.__doc__ = _hard_shrink_.__doc__ + """
Examples:
>>> data = fluid.layers.data(name="input", shape=[784])
......@@ -141,3 +138,25 @@ Examples:
>>> data = fluid.layers.data(name="input", shape=[32, 784])
>>> result = fluid.layers.cumsum(data, axis=0)
"""
__all__ += ['thresholded_relu']
_thresholded_relu_ = generate_layer_fn('thresholded_relu')
def thresholded_relu(x, threshold=None):
kwargs = dict()
for name in locals():
val = locals()[name]
if val is not None:
kwargs[name] = val
_thresholded_relu_(**kwargs)
thresholded_relu.__doc__ = _thresholded_relu_.__doc__ + """
Examples:
>>> data = fluid.layers.data(name="input", shape=[1])
>>> result = fluid.layers.thresholded_relu(data, threshold=0.4)
"""
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册