diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 71f5702a7cc9a7a40ccad91101cf1a182897d103..95126e9f239d1c8d901ef5b24384506bad58eb93 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -115,7 +115,6 @@ __all__ = [ 'log', 'crop_tensor', 'prelu', - 'brelu', 'flatten', 'pad2d', 'unique', @@ -7831,52 +7830,6 @@ def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): return out -@templatedoc() -def brelu(x, t_min=0.0, t_max=24.0, name=None): - """ - ${comment} - Args: - x(${x_type}): ${x_comment} - t_min(${t_min_type}|0.0): ${t_min_comment} - t_max(${t_max_type}|24.0): ${t_max_comment} - name(str|None): The default value is None. Normally there is no need for user to set this property. - For more information, please refer to :ref:`api_guide_Name`. - Returns: - ${out_type}: ${out_comment} - - Examples: - - .. code-block:: python - - import paddle.fluid as fluid - import paddle - import numpy as np - paddle.enable_static() - - input_brelu = np.array([[-1,6],[1,15.6]]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_brelu) - y = fluid.layers.brelu(x, t_min=1.0, t_max=10.0) - print(y.numpy()) - #[[ 1. 6.] - #[ 1. 10.]] - """ - if _non_static_mode(): - return _legacy_C_ops.brelu(x, 't_min', t_min, 't_max', t_max) - - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu') - - helper = LayerHelper('brelu', **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type='brelu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'t_min': t_min, 't_max': t_max}, - ) - return out - - def flatten(x, axis=1, name=None): r""" **Flatten op** diff --git a/python/paddle/fluid/tests/unittests/ipu/test_activation_ops_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_activation_ops_ipu.py index 672195469d2b23555af9c836442d69a63c2da943..3a16a06ea1b81aacd47a89bd50abc9f948f67d85 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_activation_ops_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_activation_ops_ipu.py @@ -63,7 +63,7 @@ class TestBase(IPUOpTest): self.check() -class TestBReluCase0(TestBase): +class TestHardTanhCase0(TestBase): def set_data_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) * 30 self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -71,14 +71,14 @@ class TestBReluCase0(TestBase): self.feed_list = list(self.feed_fp32.keys()) def set_test_op(self): - self.op = paddle.fluid.layers.brelu + self.op = paddle.nn.functional.hardtanh self.op_attrs = {} -class TestBReluCase1(TestBReluCase0): +class TestHardTanhCase1(TestHardTanhCase0): def set_test_op(self): - self.op = paddle.fluid.layers.brelu - self.op_attrs = {"t_min": 0.1, 't_max': 10.0} + self.op = paddle.nn.functional.hardtanh + self.op_attrs = {"min": 0.1, 'max': 10.0} class TestEluCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 39a866434afd67c830c37020cec89a3aa9fb2913..6cfe72bfdfd395649ccbf193be98131f7305d780 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -1891,51 +1891,6 @@ class TestBRelu(TestActivation): self.check_grad(['X'], 'Out') -class TestBreluAPI(unittest.TestCase): - # test paddle.fluid.layers.brelu - def setUp(self): - np.random.seed(1024) - self.t_min = 0.0 - self.t_max = 24.0 - self.x_np = np.random.uniform(-1, 30, [10, 12]).astype('float32') - self.out_ref = np.copy(self.x_np) - self.out_ref[self.out_ref < self.t_min] = self.t_min - self.out_ref[self.out_ref > self.t_max] = self.t_max - self.out_ref = self.out_ref.astype('float32') - self.place = ( - paddle.CUDAPlace(0) - if paddle.is_compiled_with_cuda() - else paddle.CPUPlace() - ) - - def test_fluid_api(self): - with paddle.static.program_guard(paddle.static.Program()): - x = paddle.static.data('X', [10, 12]) - out = paddle.fluid.layers.brelu(x) - exe = paddle.static.Executor(self.place) - res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) - np.testing.assert_allclose(self.out_ref, res[0], rtol=1e-05) - - paddle.disable_static(self.place) - x = paddle.to_tensor(self.x_np) - out = paddle.fluid.layers.brelu(x) - np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05) - paddle.enable_static() - - def test_errors(self): - with program_guard(Program()): - # The input type must be Variable. - self.assertRaises(TypeError, fluid.layers.brelu, 1) - # The input dtype must be float16, float32, float64. - x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32') - self.assertRaises(TypeError, fluid.layers.brelu, x_int32) - # support the input dtype is float16 - x_fp16 = fluid.layers.data( - name='x_fp16', shape=[12, 10], dtype='float16' - ) - fluid.layers.brelu(x_fp16) - - def ref_relu6(x, threshold=6.0): out = np.copy(x) out[np.abs(x - threshold) < 0.005] = threshold + 0.02