未验证 提交 5a45ceb2 编写于 作者: 傅剑寒 提交者: GitHub

Remove fluid.layers.relu6 under fluid directory (#47876)

* remove relu6 test case under fluid

* fix relu6 test case in mkldnn_elt_act_fuse_pass
上级 1175a2b9
......@@ -14,7 +14,6 @@
"""
All layers just related to the neural network.
"""
import os
import inspect
import warnings
......@@ -127,7 +126,6 @@ __all__ = [
'log',
'crop',
'crop_tensor',
'relu6',
'pow',
'hard_sigmoid',
'prelu',
......@@ -9580,52 +9578,6 @@ def pad2d(
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu6")
def relu6(x, threshold=6.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
threshold(float, optional): ${threshold_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
output(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[2.5,7.8]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu6(x=x1, threshold=6.0)
print(out1.numpy())
# [[0. 0. ]
# [2.5 6. ]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
helper = LayerHelper('relu6', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='relu6',
inputs={'X': x},
outputs={'Out': out},
attrs={
'threshold': threshold,
'use_mkldnn': _global_flags()["FLAGS_use_mkldnn"],
},
)
return out
@templatedoc()
def pow(x, factor=1.0, name=None):
"""
......
......@@ -80,7 +80,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
y = self._conv(inputs)
y = self._batch_norm(y)
if if_act:
y = fluid.layers.relu6(y)
y = paddle.nn.functional.relu6(y)
return y
......
......@@ -151,8 +151,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Relu6(
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.act = fluid.layers.relu6
self.act_alpha = 5.0
self.act = paddle.nn.functional.relu6
class ElementwiseActivationMkldnnFusePassTest_Add_Sigmoid(
......@@ -244,8 +243,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Relu6(
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.act = fluid.layers.relu6
self.act_alpha = 5.0
self.act = paddle.nn.functional.relu6
class ElementwiseActivationMkldnnFusePassTest_Sub_Sigmoid(
......@@ -345,8 +343,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Relu6(
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.act = fluid.layers.relu6
self.act_alpha = 5.0
self.act = paddle.nn.functional.relu6
class ElementwiseActivationMkldnnFusePassTest_Mul_Sigmoid(
......
......@@ -71,7 +71,7 @@ class TensorRTSubgraphPassLeakyReluTest(TensorRTSubgraphPassActivationTest):
class TensorRTSubgraphPassRelu6Test(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.relu6(x)
return paddle.nn.functional.relu6(x)
class TensorRTSubgraphPassSoftMaxTest(TensorRTSubgraphPassActivationTest):
......
......@@ -2074,7 +2074,7 @@ class TestRelu6API(unittest.TestCase):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.relu6(x)
out = paddle.nn.functional.relu6(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_relu6(self.x_np)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册