提交 055df470 编写于 作者: Y yuyang18

Polish code

上级 cbc1b7f1
...@@ -275,7 +275,7 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -275,7 +275,7 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
"The value of threshold for HardShrink. [default: 0.5]") "The value of threshold for HardShrink. [default: 0.5]")
.SetDefault(0.5f); .SetDefault(0.5f);
AddComment(R"DOC( AddComment(R"DOC(
** HardShrink activation operator ** :strong:`HardShrink activation operator`
.. math:: .. math::
out = \begin{cases} out = \begin{cases}
...@@ -394,10 +394,11 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -394,10 +394,11 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override { void Make() override {
AddInput("X", "Input of ThresholdedRelu operator"); AddInput("X", "Input of ThresholdedRelu operator");
AddOutput("Out", "Output of ThresholdedRelu operator"); AddOutput("Out", "Output of ThresholdedRelu operator");
AddAttr<float>("threshold", "The threshold location of activation") AddAttr<float>("threshold",
"The threshold location of activation. [default 1.0].")
.SetDefault(1.0f); .SetDefault(1.0f);
AddComment(R"DOC( AddComment(R"DOC(
ThresholdedRelu Activation Operator. :strong:`ThresholdedRelu activation operator`
.. math:: .. math::
......
...@@ -94,7 +94,7 @@ class RowConvOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -94,7 +94,7 @@ class RowConvOpMaker : public framework::OpProtoAndCheckerMaker {
"in this LodTensor is a matrix with shape T x N, i.e., the " "in this LodTensor is a matrix with shape T x N, i.e., the "
"same shape as X."); "same shape as X.");
AddComment(R"DOC( AddComment(R"DOC(
** Row-convolution operator ** :strong:`Row-convolution operator`
The row convolution is called lookahead convolution. This operator was The row convolution is called lookahead convolution. This operator was
introduced in the following paper for DeepSpeech2: introduced in the following paper for DeepSpeech2:
......
...@@ -40,7 +40,6 @@ __activations__ = [ ...@@ -40,7 +40,6 @@ __activations__ = [
'relu6', 'relu6',
'pow', 'pow',
'stanh', 'stanh',
'thresholded_relu',
'hard_sigmoid', 'hard_sigmoid',
'swish', 'swish',
] ]
...@@ -91,8 +90,7 @@ def uniform_random(shape, dtype=None, min=None, max=None, seed=None): ...@@ -91,8 +90,7 @@ def uniform_random(shape, dtype=None, min=None, max=None, seed=None):
return _uniform_random_(**kwargs) return _uniform_random_(**kwargs)
uniform_random.__doc__ = _uniform_random_.__doc__ + "\n" \ uniform_random.__doc__ = _uniform_random_.__doc__ + """
+ """
Examples: Examples:
>>> result = fluid.layers.uniform_random(shape=[32, 784]) >>> result = fluid.layers.uniform_random(shape=[32, 784])
...@@ -112,8 +110,7 @@ def hard_shrink(x, threshold=None): ...@@ -112,8 +110,7 @@ def hard_shrink(x, threshold=None):
return _hard_shrink_(**kwargs) return _hard_shrink_(**kwargs)
hard_shrink.__doc__ = _hard_shrink_.__doc__ + "\n" \ hard_shrink.__doc__ = _hard_shrink_.__doc__ + """
+ """
Examples: Examples:
>>> data = fluid.layers.data(name="input", shape=[784]) >>> data = fluid.layers.data(name="input", shape=[784])
...@@ -141,3 +138,25 @@ Examples: ...@@ -141,3 +138,25 @@ Examples:
>>> data = fluid.layers.data(name="input", shape=[32, 784]) >>> data = fluid.layers.data(name="input", shape=[32, 784])
>>> result = fluid.layers.cumsum(data, axis=0) >>> result = fluid.layers.cumsum(data, axis=0)
""" """
__all__ += ['thresholded_relu']
_thresholded_relu_ = generate_layer_fn('thresholded_relu')
def thresholded_relu(x, threshold=None):
kwargs = dict()
for name in locals():
val = locals()[name]
if val is not None:
kwargs[name] = val
_thresholded_relu_(**kwargs)
thresholded_relu.__doc__ = _thresholded_relu_.__doc__ + """
Examples:
>>> data = fluid.layers.data(name="input", shape=[1])
>>> result = fluid.layers.thresholded_relu(data, threshold=0.4)
"""
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册