diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3d26fc1260381f38a51db03c2a8a7ba04231c09e..f8c741585081aad89eb502002344f02891942b4a 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -121,7 +121,6 @@ __all__ = [ 'pow', 'prelu', 'brelu', - 'leaky_relu', 'flatten', 'pad2d', 'unique', @@ -8238,33 +8237,6 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None): return out -@deprecated(since="2.0.0", update_to="paddle.nn.functional.leaky_relu") -@templatedoc() -def leaky_relu(x, alpha=0.02, name=None): - """ - ${comment} - Args: - x(${x_type}): ${x_comment} - alpha(${alpha_type}|0.02): ${alpha_comment} - name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` - - Returns: - output(${out_type}): ${out_comment} - - Examples: - - .. code-block:: python - - import paddle - - x = paddle.to_tensor([[-1, 2], [3, -4]], dtype='float32') - y = paddle.fluid.layers.leaky_relu(x, alpha=0.1) - print(y) # [[-0.1, 2], [3, -0.4]] - - """ - return paddle.nn.functional.leaky_relu(x, alpha, name) - - def flatten(x, axis=1, name=None): r""" **Flatten op** diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py index b1cb22c57008d4348221be78a0eab56a324d5300..b532a368214452ce8bff76ffad7d708810ba6e42 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py @@ -65,7 +65,7 @@ class ConvBNLayer(fluid.dygraph.Layer): out = self.conv(inputs) out = self.batch_norm(out) if self.act == 'leaky': - out = fluid.layers.leaky_relu(x=out, alpha=0.1) + out = paddle.nn.functional.leaky_relu(out, 0.1) return out diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py index ab79a05796de48d39404365530ab4837c4c04316..a41791eb04ef4f43028de64233bd37642ea43bdd 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py @@ -396,7 +396,7 @@ class conv2d(fluid.dygraph.Layer): if self.norm: conv = self.bn(conv) if self.relu: - conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor) + conv = paddle.nn.functional.leaky_relu(conv, self.relufactor) return conv @@ -468,7 +468,7 @@ class DeConv2D(fluid.dygraph.Layer): if self.norm: conv = self.bn(conv) if self.relu: - conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor) + conv = paddle.nn.functional.leaky_relu(conv, self.relufactor) return conv diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py index bcf79b82baf47a14379a0fdae09e894b3857ee34..6cb722517468dd1291cebcf54baa1aae7f164823 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py @@ -84,7 +84,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_LeakyRelu( def set_params(self): self.operand = fluid.layers.elementwise_add self.act_alpha = 0.2 - self.act = fluid.layers.leaky_relu + self.act = paddle.nn.functional.leaky_relu class ElementwiseActivationMkldnnFusePassTest_Add_Swish( @@ -184,7 +184,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_LeakyRelu( def set_params(self): self.operand = fluid.layers.elementwise_sub self.act_alpha = 0.2 - self.act = fluid.layers.leaky_relu + self.act = paddle.nn.functional.leaky_relu class ElementwiseActivationMkldnnFusePassTest_Sub_Swish( @@ -276,7 +276,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_LeakyRelu( def set_params(self): self.operand = fluid.layers.elementwise_mul self.act_alpha = 0.2 - self.act = fluid.layers.leaky_relu + self.act = paddle.nn.functional.leaky_relu class ElementwiseActivationMkldnnFusePassTest_Mul_Swish( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py index 7f4276bff5e7d073fce7e8913cec320be422e46a..763608afdf7ac44819b2ce476c46451b469b4101 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py @@ -67,7 +67,7 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest): class TensorRTSubgraphPassLeakyReluTest(TensorRTSubgraphPassActivationTest): def append_act(self, x): - return fluid.layers.leaky_relu(x) + return paddle.nn.functional.leaky_relu(x) class TensorRTSubgraphPassRelu6Test(TensorRTSubgraphPassActivationTest): diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index 4182d1c586df5b172f0a3ceb905acd71940ae95e..e0c40e20021119dbd01e2f43e9a52cfb1b695a3a 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -217,7 +217,7 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): x = layers.data('x', shape, False, dtype) x.persistable = True - y = layers.leaky_relu(x, alpha=alpha) + y = paddle.nn.functional.leaky_relu(x, alpha) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.02 diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 913777c2515f43545356dc1e9cc719006aa5a3c5..abfc0527dbde45598962e602ba7af3d6f218ca70 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -1689,7 +1689,6 @@ class TestLeakyRelu_ZeroDim(TestLeakyRelu): class TestLeakyReluAPI(unittest.TestCase): # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu, - # fluid.layers.leaky_relu def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index f121bacb2a5856e9015c2aee7f36f296b0be4e70..c38caf69e086a72b1a228ab9a7ca4029cea3e5ce 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -365,7 +365,7 @@ class TestDygraphDoubleGrad(TestCase): x.stop_gradient = False alpha = 0.2 - y = fluid.layers.leaky_relu(x, alpha=alpha) + y = paddle.nn.functional.leaky_relu(x, alpha) y = y * y z = y * y diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index a75bc4b8a8e58a3342c53617c374acd94560bedd..2c8d408316b6d484dad7238558bf5afef3688111 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -163,7 +163,7 @@ class Conv2DLayer(fluid.dygraph.Layer): conv = self._norm(conv) if self.relufactor is not None: - conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor) + conv = paddle.nn.functional.leaky_relu(conv, self.relufactor) return conv @@ -205,7 +205,7 @@ class Deconv2DLayer(fluid.dygraph.Layer): deconv = self._norm(deconv) if self.relufactor is not None: - deconv = fluid.layers.leaky_relu(deconv, alpha=self.relufactor) + deconv = paddle.nn.functional.leaky_relu(deconv, self.relufactor) return deconv diff --git a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py index d02214623b7ce37d60824d4ea7f6abcc8c7de9c2..56f3c13f4f33dbfdcad1fc298ea8238093ac6b0c 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py @@ -78,7 +78,7 @@ class TestInplaceANBOpTraining(unittest.TestCase): in_place=inplace, ) if activation == 'leaky_relu': - bn = fluid.layers.leaky_relu(bn, alpha) + bn = paddle.nn.functional.leaky_relu(bn, alpha) if activation == 'elu': bn = paddle.nn.functional.elu(bn, alpha) diff --git a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py index e6e0e50ac76d78b3f23a8ebfac2f9a2d83c3b18a..9d7bfc6888f1a8718f36982d9d69400e159b5912 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py @@ -164,7 +164,7 @@ class TestDygraphDoubleGrad(TestCase): x.stop_gradient = False alpha = 0.2 - y = fluid.layers.leaky_relu(x, alpha=alpha) + y = paddle.nn.functional.leaky_relu(x, alpha) y = y * y z = y * y