From e16d9a8e57bbafd3cc3462d20c2ead6e033bcfc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=82=85=E5=89=91=E5=AF=92?= Date: Wed, 23 Nov 2022 18:41:23 +0800 Subject: [PATCH] remove_leaky_relu in nn.py under fluid (#47901) --- python/paddle/fluid/layers/nn.py | 28 ------------------- .../unittests/dygraph_to_static/darknet.py | 2 +- .../dygraph_to_static/test_cycle_gan.py | 4 +-- .../test_mkldnn_elt_act_fuse_pass.py | 6 ++-- .../ir/inference/test_trt_activation_pass.py | 2 +- .../unittests/test_activation_nn_grad.py | 2 +- .../tests/unittests/test_activation_op.py | 1 - .../unittests/test_imperative_double_grad.py | 2 +- ...perative_star_gan_with_gradient_penalty.py | 4 +-- .../tests/unittests/test_inplace_abn_op.py | 2 +- .../test_paddle_imperative_double_grad.py | 2 +- 11 files changed, 13 insertions(+), 42 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3d26fc12603..f8c74158508 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -121,7 +121,6 @@ __all__ = [ 'pow', 'prelu', 'brelu', - 'leaky_relu', 'flatten', 'pad2d', 'unique', @@ -8238,33 +8237,6 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None): return out -@deprecated(since="2.0.0", update_to="paddle.nn.functional.leaky_relu") -@templatedoc() -def leaky_relu(x, alpha=0.02, name=None): - """ - ${comment} - Args: - x(${x_type}): ${x_comment} - alpha(${alpha_type}|0.02): ${alpha_comment} - name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` - - Returns: - output(${out_type}): ${out_comment} - - Examples: - - .. code-block:: python - - import paddle - - x = paddle.to_tensor([[-1, 2], [3, -4]], dtype='float32') - y = paddle.fluid.layers.leaky_relu(x, alpha=0.1) - print(y) # [[-0.1, 2], [3, -0.4]] - - """ - return paddle.nn.functional.leaky_relu(x, alpha, name) - - def flatten(x, axis=1, name=None): r""" **Flatten op** diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py index b1cb22c5700..b532a368214 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py @@ -65,7 +65,7 @@ class ConvBNLayer(fluid.dygraph.Layer): out = self.conv(inputs) out = self.batch_norm(out) if self.act == 'leaky': - out = fluid.layers.leaky_relu(x=out, alpha=0.1) + out = paddle.nn.functional.leaky_relu(out, 0.1) return out diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py index ab79a05796d..a41791eb04e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py @@ -396,7 +396,7 @@ class conv2d(fluid.dygraph.Layer): if self.norm: conv = self.bn(conv) if self.relu: - conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor) + conv = paddle.nn.functional.leaky_relu(conv, self.relufactor) return conv @@ -468,7 +468,7 @@ class DeConv2D(fluid.dygraph.Layer): if self.norm: conv = self.bn(conv) if self.relu: - conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor) + conv = paddle.nn.functional.leaky_relu(conv, self.relufactor) return conv diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py index bcf79b82baf..6cb72251746 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py @@ -84,7 +84,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_LeakyRelu( def set_params(self): self.operand = fluid.layers.elementwise_add self.act_alpha = 0.2 - self.act = fluid.layers.leaky_relu + self.act = paddle.nn.functional.leaky_relu class ElementwiseActivationMkldnnFusePassTest_Add_Swish( @@ -184,7 +184,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_LeakyRelu( def set_params(self): self.operand = fluid.layers.elementwise_sub self.act_alpha = 0.2 - self.act = fluid.layers.leaky_relu + self.act = paddle.nn.functional.leaky_relu class ElementwiseActivationMkldnnFusePassTest_Sub_Swish( @@ -276,7 +276,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_LeakyRelu( def set_params(self): self.operand = fluid.layers.elementwise_mul self.act_alpha = 0.2 - self.act = fluid.layers.leaky_relu + self.act = paddle.nn.functional.leaky_relu class ElementwiseActivationMkldnnFusePassTest_Mul_Swish( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py index 7f4276bff5e..763608afdf7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py @@ -67,7 +67,7 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest): class TensorRTSubgraphPassLeakyReluTest(TensorRTSubgraphPassActivationTest): def append_act(self, x): - return fluid.layers.leaky_relu(x) + return paddle.nn.functional.leaky_relu(x) class TensorRTSubgraphPassRelu6Test(TensorRTSubgraphPassActivationTest): diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index 4182d1c586d..e0c40e20021 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -217,7 +217,7 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): x = layers.data('x', shape, False, dtype) x.persistable = True - y = layers.leaky_relu(x, alpha=alpha) + y = paddle.nn.functional.leaky_relu(x, alpha) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.02 diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 913777c2515..abfc0527dbd 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -1689,7 +1689,6 @@ class TestLeakyRelu_ZeroDim(TestLeakyRelu): class TestLeakyReluAPI(unittest.TestCase): # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu, - # fluid.layers.leaky_relu def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index f121bacb2a5..c38caf69e08 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -365,7 +365,7 @@ class TestDygraphDoubleGrad(TestCase): x.stop_gradient = False alpha = 0.2 - y = fluid.layers.leaky_relu(x, alpha=alpha) + y = paddle.nn.functional.leaky_relu(x, alpha) y = y * y z = y * y diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index a75bc4b8a8e..2c8d408316b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -163,7 +163,7 @@ class Conv2DLayer(fluid.dygraph.Layer): conv = self._norm(conv) if self.relufactor is not None: - conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor) + conv = paddle.nn.functional.leaky_relu(conv, self.relufactor) return conv @@ -205,7 +205,7 @@ class Deconv2DLayer(fluid.dygraph.Layer): deconv = self._norm(deconv) if self.relufactor is not None: - deconv = fluid.layers.leaky_relu(deconv, alpha=self.relufactor) + deconv = paddle.nn.functional.leaky_relu(deconv, self.relufactor) return deconv diff --git a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py index d02214623b7..56f3c13f4f3 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py @@ -78,7 +78,7 @@ class TestInplaceANBOpTraining(unittest.TestCase): in_place=inplace, ) if activation == 'leaky_relu': - bn = fluid.layers.leaky_relu(bn, alpha) + bn = paddle.nn.functional.leaky_relu(bn, alpha) if activation == 'elu': bn = paddle.nn.functional.elu(bn, alpha) diff --git a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py index e6e0e50ac76..9d7bfc6888f 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py @@ -164,7 +164,7 @@ class TestDygraphDoubleGrad(TestCase): x.stop_gradient = False alpha = 0.2 - y = fluid.layers.leaky_relu(x, alpha=alpha) + y = paddle.nn.functional.leaky_relu(x, alpha) y = y * y z = y * y -- GitLab