未验证 提交 1175a2b9 编写于 作者: V Vvsmile 提交者: GitHub

Remove API: selu (#47969)

replace paddle.fluid.layers.selu with paddle.nn.functional.selu
上级 844ab6fe
...@@ -124,7 +124,6 @@ __all__ = [ ...@@ -124,7 +124,6 @@ __all__ = [
'random_crop', 'random_crop',
'mean_iou', 'mean_iou',
'relu', 'relu',
'selu',
'log', 'log',
'crop', 'crop',
'crop_tensor', 'crop_tensor',
...@@ -9074,78 +9073,6 @@ def relu(x, name=None): ...@@ -9074,78 +9073,6 @@ def relu(x, name=None):
return out return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.selu")
def selu(x, scale=None, alpha=None, name=None):
r"""
Selu Operator.
The equation is:
.. math::
selu= \\lambda*
\\begin{cases}
x &\\quad \\text{ if } x>0 \n
\\alpha * e^x - \\alpha &\\quad \\text{ if } x<=0
\\end{cases}
The input `X` can carry the LoD (Level of Details) information,
or not. And the output shares the LoD information with input `X`.
Args:
x (Variable): The input N-D Tensor.
scale(float, optional): lambda in selu activation function,
the default value is 1.0507009873554804934193349852946.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
alpha(float, optional): alpha in selu activation function,
the default value is 1.6732632423543772848170429916717.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor): The output Tensor or LoDTensor with the same shape and LoD information as input.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.selu(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0. , 1.050701],[2.101402, 3.152103]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'selu')
helper = LayerHelper('selu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
attrs = {}
if scale is not None:
attrs["scale"] = scale
if alpha is not None:
attrs["alpha"] = alpha
helper.append_op(
type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs
)
return out
def mean_iou(input, label, num_classes): def mean_iou(input, label, num_classes):
r""" r"""
Mean Intersection-Over-Union is a common evaluation metric for Mean Intersection-Over-Union is a common evaluation metric for
......
...@@ -118,7 +118,7 @@ class TestSeluAPI(unittest.TestCase): ...@@ -118,7 +118,7 @@ class TestSeluAPI(unittest.TestCase):
def test_fluid_api(self): def test_fluid_api(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype) x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.selu(x, self.scale, self.alpha) out = F.selu(x, self.scale, self.alpha)
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_selu(self.x_np, self.scale, self.alpha) out_ref = ref_selu(self.x_np, self.scale, self.alpha)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册