diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3e8479acfb995cdc08ba850c9bf96c53fec0ed35..8f873b9ff5bdd9d02753e91af19f507da339c526 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -14,7 +14,6 @@ """ All layers just related to the neural network. """ - import os import inspect import warnings @@ -127,7 +126,6 @@ __all__ = [ 'log', 'crop', 'crop_tensor', - 'relu6', 'pow', 'hard_sigmoid', 'prelu', @@ -9580,52 +9578,6 @@ def pad2d( return out -@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu6") -def relu6(x, threshold=6.0, name=None): - """ - - ${comment} - - Args: - x(${x_type}): ${x_comment} - threshold(float, optional): ${threshold_comment} - name(str, optional): The default value is None. Normally there is no - need for user to set this property. For more information, please - refer to :ref:`api_guide_Name`. - - Returns: - output(${out_type}): ${out_comment} - - Examples: - - .. code-block:: python - - import paddle.fluid as fluid - import numpy as np - in1 = np.array([[-1,0],[2.5,7.8]]) - with fluid.dygraph.guard(): - x1 = fluid.dygraph.to_variable(in1) - out1 = fluid.layers.relu6(x=x1, threshold=6.0) - print(out1.numpy()) - # [[0. 0. ] - # [2.5 6. ]] - """ - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6') - - helper = LayerHelper('relu6', **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type='relu6', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'threshold': threshold, - 'use_mkldnn': _global_flags()["FLAGS_use_mkldnn"], - }, - ) - return out - - @templatedoc() def pow(x, factor=1.0, name=None): """ diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py index 083345d9db99a4fbdfae516b9a41fb0660b86ec7..af50300be3a1cbf6027461b869c0dd153f88b060 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py @@ -80,7 +80,7 @@ class ConvBNLayer(fluid.dygraph.Layer): y = self._conv(inputs) y = self._batch_norm(y) if if_act: - y = fluid.layers.relu6(y) + y = paddle.nn.functional.relu6(y) return y diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py index b926a8f71a9d50b63460bb0c6c5e8b4522bdd340..3c2365c29e60cbb05ca0c69838d19102292ab370 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py @@ -151,8 +151,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Relu6( ): def set_params(self): self.operand = fluid.layers.elementwise_add - self.act = fluid.layers.relu6 - self.act_alpha = 5.0 + self.act = paddle.nn.functional.relu6 class ElementwiseActivationMkldnnFusePassTest_Add_Sigmoid( @@ -244,8 +243,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Relu6( ): def set_params(self): self.operand = fluid.layers.elementwise_sub - self.act = fluid.layers.relu6 - self.act_alpha = 5.0 + self.act = paddle.nn.functional.relu6 class ElementwiseActivationMkldnnFusePassTest_Sub_Sigmoid( @@ -345,8 +343,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Relu6( ): def set_params(self): self.operand = fluid.layers.elementwise_mul - self.act = fluid.layers.relu6 - self.act_alpha = 5.0 + self.act = paddle.nn.functional.relu6 class ElementwiseActivationMkldnnFusePassTest_Mul_Sigmoid( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py index 80e51dbf26124a4bc42873c5cf0366efc29b09ae..9b6e3e641037d1c237306c38813c3047e84b0f6e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py @@ -71,7 +71,7 @@ class TensorRTSubgraphPassLeakyReluTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassRelu6Test(TensorRTSubgraphPassActivationTest): def append_act(self, x): - return fluid.layers.relu6(x) + return paddle.nn.functional.relu6(x) class TensorRTSubgraphPassSoftMaxTest(TensorRTSubgraphPassActivationTest): diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index f460ed58c1d9ccfc02d782c4078b164e3db76628..ddd287a5803189e76d778f8fedfc1a238800645a 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -2074,7 +2074,7 @@ class TestRelu6API(unittest.TestCase): paddle.enable_static() with fluid.program_guard(fluid.Program()): x = fluid.data('X', self.x_np.shape, self.x_np.dtype) - out = fluid.layers.relu6(x) + out = paddle.nn.functional.relu6(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_relu6(self.x_np)