diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 0e6abd666797b14182e9f1be26d8b9aefb900b88..41766c6651aadf629da97e2ba805b75af3dbc9b6 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -131,7 +131,6 @@ __all__ = [ 'log', 'crop', 'crop_tensor', - 'elu', 'relu6', 'pow', 'stanh', @@ -9923,49 +9922,6 @@ def pad2d( return out -@deprecated(since="2.0.0", update_to="paddle.nn.functional.elu") -def elu(x, alpha=1.0, name=None): - """ - :alias_main: paddle.nn.functional.elu - :alias: paddle.nn.functional.elu,paddle.nn.functional.activation.elu - :old_api: paddle.fluid.layers.elu - - ${comment} - Args: - x(${x_type}): ${x_comment} - alpha(${alpha_type}|1.0): ${alpha_comment} - name(str|None): The default value is None. Normally there is no need for user to set this property. - For more information, please refer to :ref:`api_guide_Name`. - Returns: - ${out_type}: ${out_comment} - - Examples: - - .. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - input_elu = np.array([[-1,6],[1,15.6]]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_elu) - y = fluid.layers.elu(x, alpha=0.2) - print(y.numpy()) - # [[-0.12642411 6. ] - # [ 1. 15.6 ]] - """ - helper = LayerHelper('elu', **locals()) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu') - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type='elu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'alpha': alpha}, - ) - return out - - @deprecated(since="2.0.0", update_to="paddle.nn.functional.relu6") def relu6(x, threshold=6.0, name=None): """ diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index 38a894755f464b101f1769c47bc9582064e07c2a..cee448b0648f59c15df3824637f98fb848d7b33a 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -252,7 +252,7 @@ class TestELUDoubleGradCheck(unittest.TestCase): x = layers.data('x', shape, False, dtype) x.persistable = True - y = layers.elu(x, alpha=alpha) + y = paddle.nn.functional.elu(x, alpha=alpha) np.random.RandomState(SEED) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) gradient_checker.double_grad_check( diff --git a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py index 1048c6710d27ccdec223e3394d6dc594d8c5b9d9..ff033d51efdbd369fcdb904c59f2596d8ef06328 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py @@ -80,7 +80,7 @@ class TestInplaceANBOpTraining(unittest.TestCase): if activation == 'leaky_relu': bn = fluid.layers.leaky_relu(bn, alpha) if activation == 'elu': - bn = fluid.layers.elu(bn, alpha) + bn = paddle.nn.functional.elu(bn, alpha) # NOTE: in inplace mode input and output of bn # may have same name, multiply 1. to generate