未验证 提交 7619188a 编写于 作者: 傅剑寒 提交者: GitHub

(fluid清理)remove swish in nn.py under fluid (#47891)

* remove swish in nn.py under fluid

* fix tswish test case
上级 209f684c
...@@ -134,7 +134,6 @@ __all__ = [ ...@@ -134,7 +134,6 @@ __all__ = [
'relu6', 'relu6',
'pow', 'pow',
'hard_sigmoid', 'hard_sigmoid',
'swish',
'prelu', 'prelu',
'brelu', 'brelu',
'leaky_relu', 'leaky_relu',
...@@ -9979,92 +9978,6 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): ...@@ -9979,92 +9978,6 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
return out return out
@templatedoc()
def swish(x, beta=1.0, name=None):
r"""
:alias_main: paddle.nn.functional.swish
:alias: paddle.nn.functional.swish,paddle.nn.functional.activation.swish
:old_api: paddle.fluid.layers.swish
Elementwise swish activation function. See `Searching for Activation Functions <https://arxiv.org/abs/1710.05941>`_ for more details.
Equation:
.. math::
out = \\frac{x}{1 + e^{- beta * x}}
Args:
x(Variable): Tensor or LoDTensor, dtype: float32 or float64, the input of swish activation.
beta(float): Constant beta of swish operator, default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Output of the swish activation, Tensor or LoDTensor, with the same dtype and shape with the input x.
Examples:
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
y = fluid.layers.swish(x, beta=2.0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
data = np.random.randn(2, 3).astype("float32")
exe.run(start)
y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
data
# array([[-1.1239197 , 1.3391294 , 0.03921051],
# [ 1.1970421 , 0.02440812, 1.2055548 ]], dtype=float32)
y_np
# array([[-0.2756806 , 1.0610548 , 0.01998957],
# [ 0.9193261 , 0.01235299, 0.9276883 ]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
data = np.random.randn(2, 3).astype("float32")
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = dg.to_variable(data)
y = fluid.layers.swish(x)
y_np = y.numpy()
data
# array([[-0.0816701 , 1.1603649 , -0.88325626],
# [ 0.7522361 , 1.0978601 , 0.12987892]], dtype=float32)
y_np
# array([[-0.03916847, 0.8835007 , -0.25835553],
# [ 0.51126915, 0.82324016, 0.06915068]], dtype=float32)
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': beta},
)
return out
@deprecated(since="2.0.0", update_to="paddle.static.nn.prelu") @deprecated(since="2.0.0", update_to="paddle.static.nn.prelu")
def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): def prelu(x, mode, param_attr=None, data_format="NCHW", name=None):
r""" r"""
......
...@@ -92,8 +92,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Swish( ...@@ -92,8 +92,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Swish(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_add self.operand = fluid.layers.elementwise_add
self.act_alpha = 4 self.act = paddle.nn.functional.swish
self.act = fluid.layers.swish
class ElementwiseActivationMkldnnFusePassTest_Add_HardSwish( class ElementwiseActivationMkldnnFusePassTest_Add_HardSwish(
...@@ -194,7 +193,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Swish( ...@@ -194,7 +193,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Swish(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_sub self.operand = fluid.layers.elementwise_sub
self.act = fluid.layers.swish self.act = paddle.nn.functional.swish
class ElementwiseActivationMkldnnFusePassTest_Sub_HardSwish( class ElementwiseActivationMkldnnFusePassTest_Sub_HardSwish(
...@@ -287,7 +286,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Swish( ...@@ -287,7 +286,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Swish(
): ):
def set_params(self): def set_params(self):
self.operand = fluid.layers.elementwise_mul self.operand = fluid.layers.elementwise_mul
self.act = fluid.layers.swish self.act = paddle.nn.functional.swish
class ElementwiseActivationMkldnnFusePassTest_Mul_HardSwish( class ElementwiseActivationMkldnnFusePassTest_Mul_HardSwish(
......
...@@ -21,6 +21,7 @@ import paddle.fluid as fluid ...@@ -21,6 +21,7 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import AnalysisConfig
import paddle
class TensorRTSubgraphPassActivationTest(InferencePassTest): class TensorRTSubgraphPassActivationTest(InferencePassTest):
...@@ -118,7 +119,7 @@ class TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest): ...@@ -118,7 +119,7 @@ class TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest):
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.swish(x) return paddle.nn.functional.swish(x)
class TensorRTSubgraphPassSwishFp16SerializeTest( class TensorRTSubgraphPassSwishFp16SerializeTest(
...@@ -131,7 +132,7 @@ class TensorRTSubgraphPassSwishFp16SerializeTest( ...@@ -131,7 +132,7 @@ class TensorRTSubgraphPassSwishFp16SerializeTest(
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.swish(x) return paddle.nn.functional.swish(x)
class TensorRTSubgraphPassDynamicSwishFp16SerializeTest( class TensorRTSubgraphPassDynamicSwishFp16SerializeTest(
...@@ -152,7 +153,7 @@ class TensorRTSubgraphPassDynamicSwishFp16SerializeTest( ...@@ -152,7 +153,7 @@ class TensorRTSubgraphPassDynamicSwishFp16SerializeTest(
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.swish(x) return paddle.nn.functional.swish(x)
class TensorRTSubgraphPassMishTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassMishTest(TensorRTSubgraphPassActivationTest):
......
...@@ -3551,7 +3551,7 @@ class TestSwishAPI(unittest.TestCase): ...@@ -3551,7 +3551,7 @@ class TestSwishAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype) x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.swish(x) out = paddle.nn.functional.swish(x)
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_swish(self.x_np) out_ref = ref_swish(self.x_np)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册