未验证 提交 ef51bbfd 编写于 作者: 傅剑寒 提交者: GitHub

remove fluid.layers.soft_relu in nn.py under fluid (#47925)

上级 8d08c9e0
......@@ -136,7 +136,6 @@ __all__ = [
'prelu',
'brelu',
'leaky_relu',
'soft_relu',
'flatten',
'stack',
'pad2d',
......@@ -10156,57 +10155,6 @@ def leaky_relu(x, alpha=0.02, name=None):
return paddle.nn.functional.leaky_relu(x, alpha, name)
def soft_relu(x, threshold=40.0, name=None):
r"""
SoftRelu Activation Operator.
$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$
Args:
x(Variable): Input of soft_relu operator. Data type can be float32, float64.
threshold(float, optional): The threshold value of soft_relu, default value being 40.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor)): Output of soft_relu operator, shape and LoD same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import numpy as np
import paddle
paddle.enable_static()
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.soft_relu(inputs, threshold=20.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)]
"""
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'soft_relu'
)
helper = LayerHelper('soft_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='soft_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold},
)
return out
def flatten(x, axis=1, name=None):
r"""
**Flatten op**
......
......@@ -2238,19 +2238,6 @@ class TestSoftRelu(TestActivation):
self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestSoftReluOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.soft_relu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, fluid.layers.soft_relu, x_int32)
# support the input dtype is float16
x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
fluid.layers.soft_relu(x_fp16)
def elu(x, alpha):
out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
return out_ref.astype(x.dtype)
......
......@@ -3689,14 +3689,6 @@ class TestBook(LayerTest):
)
return out
def make_soft_relu(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.soft_relu(input, threshold=30.0, name='soft_relu')
return out
def make_sigmoid(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册