From 1175a2b9977f3533c1da5bf47f23d578a2e13cd5 Mon Sep 17 00:00:00 2001 From: Vvsmile <450864116@qq.com> Date: Mon, 21 Nov 2022 12:42:14 +0800 Subject: [PATCH] Remove API: selu (#47969) replace paddle.fluid.layers.selu with paddle.nn.functional.selu --- python/paddle/fluid/layers/nn.py | 73 ------------------- .../fluid/tests/unittests/test_selu_op.py | 2 +- 2 files changed, 1 insertion(+), 74 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 0ccd6ea0074..3e8479acfb9 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -124,7 +124,6 @@ __all__ = [ 'random_crop', 'mean_iou', 'relu', - 'selu', 'log', 'crop', 'crop_tensor', @@ -9074,78 +9073,6 @@ def relu(x, name=None): return out -@deprecated(since="2.0.0", update_to="paddle.nn.functional.selu") -def selu(x, scale=None, alpha=None, name=None): - r""" - - Selu Operator. - - The equation is: - - .. math:: - selu= \\lambda* - \\begin{cases} - x &\\quad \\text{ if } x>0 \n - \\alpha * e^x - \\alpha &\\quad \\text{ if } x<=0 - \\end{cases} - - - The input `X` can carry the LoD (Level of Details) information, - or not. And the output shares the LoD information with input `X`. - - Args: - x (Variable): The input N-D Tensor. - scale(float, optional): lambda in selu activation function, - the default value is 1.0507009873554804934193349852946. - For more information about this value, please refer - to: https://arxiv.org/abs/1706.02515. - alpha(float, optional): alpha in selu activation function, - the default value is 1.6732632423543772848170429916717. - For more information about this value, please refer - to: https://arxiv.org/abs/1706.02515. - name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . - - - Returns: - Variable(Tensor|LoDTensor): The output Tensor or LoDTensor with the same shape and LoD information as input. - - Examples: - - .. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - paddle.enable_static() - - inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32") - output = fluid.layers.selu(inputs) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img = np.array([[0, 1],[2, 3]]).astype(np.float32) - - res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) - print(res) # [array([[0. , 1.050701],[2.101402, 3.152103]], dtype=float32)] - """ - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'selu') - - helper = LayerHelper('selu', **locals()) - dtype = helper.input_dtype(input_param_name='x') - out = helper.create_variable_for_type_inference(dtype) - attrs = {} - if scale is not None: - attrs["scale"] = scale - if alpha is not None: - attrs["alpha"] = alpha - - helper.append_op( - type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs - ) - return out - - def mean_iou(input, label, num_classes): r""" Mean Intersection-Over-Union is a common evaluation metric for diff --git a/python/paddle/fluid/tests/unittests/test_selu_op.py b/python/paddle/fluid/tests/unittests/test_selu_op.py index 1390ddb5c96..381f3aa5ef5 100644 --- a/python/paddle/fluid/tests/unittests/test_selu_op.py +++ b/python/paddle/fluid/tests/unittests/test_selu_op.py @@ -118,7 +118,7 @@ class TestSeluAPI(unittest.TestCase): def test_fluid_api(self): with fluid.program_guard(fluid.Program()): x = fluid.data('X', self.x_np.shape, self.x_np.dtype) - out = fluid.layers.selu(x, self.scale, self.alpha) + out = F.selu(x, self.scale, self.alpha) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_selu(self.x_np, self.scale, self.alpha) -- GitLab