From 7619188abe811034dc982266196a5cb7795b53d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=82=85=E5=89=91=E5=AF=92?= Date: Thu, 17 Nov 2022 17:48:38 +0800 Subject: [PATCH] =?UTF-8?q?(fluid=E6=B8=85=E7=90=86)remove=20swish=20in=20?= =?UTF-8?q?nn.py=20under=20fluid=20(#47891)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * remove swish in nn.py under fluid * fix tswish test case --- python/paddle/fluid/layers/nn.py | 87 ------------------- .../test_mkldnn_elt_act_fuse_pass.py | 7 +- .../ir/inference/test_trt_activation_pass.py | 7 +- .../tests/unittests/test_activation_op.py | 2 +- 4 files changed, 8 insertions(+), 95 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index ee85d94e267..01a2aa90ce8 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -134,7 +134,6 @@ __all__ = [ 'relu6', 'pow', 'hard_sigmoid', - 'swish', 'prelu', 'brelu', 'leaky_relu', @@ -9979,92 +9978,6 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): return out -@templatedoc() -def swish(x, beta=1.0, name=None): - r""" - :alias_main: paddle.nn.functional.swish - :alias: paddle.nn.functional.swish,paddle.nn.functional.activation.swish - :old_api: paddle.fluid.layers.swish - - Elementwise swish activation function. See `Searching for Activation Functions `_ for more details. - - Equation: - - .. math:: - out = \\frac{x}{1 + e^{- beta * x}} - - Args: - x(Variable): Tensor or LoDTensor, dtype: float32 or float64, the input of swish activation. - - beta(float): Constant beta of swish operator, default 1.0. - - name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - - Variable: Output of the swish activation, Tensor or LoDTensor, with the same dtype and shape with the input x. - - Examples: - - .. code-block:: python - - # declarative mode - import numpy as np - from paddle import fluid - - x = fluid.data(name="x", shape=(-1, 3), dtype="float32") - y = fluid.layers.swish(x, beta=2.0) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - start = fluid.default_startup_program() - main = fluid.default_main_program() - - data = np.random.randn(2, 3).astype("float32") - exe.run(start) - y_np, = exe.run(main, feed={"x": data}, fetch_list=[y]) - - data - # array([[-1.1239197 , 1.3391294 , 0.03921051], - # [ 1.1970421 , 0.02440812, 1.2055548 ]], dtype=float32) - y_np - # array([[-0.2756806 , 1.0610548 , 0.01998957], - # [ 0.9193261 , 0.01235299, 0.9276883 ]], dtype=float32) - - - .. code-block:: python - - # imperative mode - import numpy as np - from paddle import fluid - import paddle.fluid.dygraph as dg - - data = np.random.randn(2, 3).astype("float32") - place = fluid.CPUPlace() - with dg.guard(place) as g: - x = dg.to_variable(data) - y = fluid.layers.swish(x) - y_np = y.numpy() - data - # array([[-0.0816701 , 1.1603649 , -0.88325626], - # [ 0.7522361 , 1.0978601 , 0.12987892]], dtype=float32) - y_np - # array([[-0.03916847, 0.8835007 , -0.25835553], - # [ 0.51126915, 0.82324016, 0.06915068]], dtype=float32) - """ - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish') - - helper = LayerHelper('swish', **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type='swish', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'slope': beta}, - ) - return out - - @deprecated(since="2.0.0", update_to="paddle.static.nn.prelu") def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): r""" diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py index c9647ec60b5..b926a8f71a9 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py @@ -92,8 +92,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Swish( ): def set_params(self): self.operand = fluid.layers.elementwise_add - self.act_alpha = 4 - self.act = fluid.layers.swish + self.act = paddle.nn.functional.swish class ElementwiseActivationMkldnnFusePassTest_Add_HardSwish( @@ -194,7 +193,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Swish( ): def set_params(self): self.operand = fluid.layers.elementwise_sub - self.act = fluid.layers.swish + self.act = paddle.nn.functional.swish class ElementwiseActivationMkldnnFusePassTest_Sub_HardSwish( @@ -287,7 +286,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Swish( ): def set_params(self): self.operand = fluid.layers.elementwise_mul - self.act = fluid.layers.swish + self.act = paddle.nn.functional.swish class ElementwiseActivationMkldnnFusePassTest_Mul_HardSwish( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py index 0f2a8a97430..80e51dbf261 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py @@ -21,6 +21,7 @@ import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import AnalysisConfig +import paddle class TensorRTSubgraphPassActivationTest(InferencePassTest): @@ -118,7 +119,7 @@ class TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest): ) def append_act(self, x): - return fluid.layers.swish(x) + return paddle.nn.functional.swish(x) class TensorRTSubgraphPassSwishFp16SerializeTest( @@ -131,7 +132,7 @@ class TensorRTSubgraphPassSwishFp16SerializeTest( ) def append_act(self, x): - return fluid.layers.swish(x) + return paddle.nn.functional.swish(x) class TensorRTSubgraphPassDynamicSwishFp16SerializeTest( @@ -152,7 +153,7 @@ class TensorRTSubgraphPassDynamicSwishFp16SerializeTest( ) def append_act(self, x): - return fluid.layers.swish(x) + return paddle.nn.functional.swish(x) class TensorRTSubgraphPassMishTest(TensorRTSubgraphPassActivationTest): diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 8c4ed1cc1fb..4a808796086 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -3551,7 +3551,7 @@ class TestSwishAPI(unittest.TestCase): paddle.enable_static() with fluid.program_guard(fluid.Program()): x = fluid.data('X', self.x_np.shape, self.x_np.dtype) - out = fluid.layers.swish(x) + out = paddle.nn.functional.swish(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_swish(self.x_np) -- GitLab