未验证 提交 676e4b35 编写于 作者: K kangguangli 提交者: GitHub

remove api `fluid.layers.relu` and its reference in sample code and unit tests (#49097)

上级 effd51c7
...@@ -73,7 +73,6 @@ __all__ = [ ...@@ -73,7 +73,6 @@ __all__ = [
'autoincreased_step_counter', 'autoincreased_step_counter',
'unsqueeze', 'unsqueeze',
'lod_reset', 'lod_reset',
'relu',
'clip', 'clip',
'clip_by_norm', 'clip_by_norm',
'mul', 'mul',
...@@ -1826,51 +1825,6 @@ def lod_reset(x, y=None, target_lod=None): ...@@ -1826,51 +1825,6 @@ def lod_reset(x, y=None, target_lod=None):
return out return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu")
def relu(x, name=None):
"""
${comment}
Args:
x(Variable): ${x_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[1,2.6]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu(x1)
print(out1.numpy())
# [[0. 0. ]
# [1. 2.6]]"""
if in_dygraph_mode():
return _C_ops.relu(x)
if _in_legacy_dygraph():
return _legacy_C_ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
inputs = {'X': [x]}
helper = LayerHelper('relu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out}
)
return out
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if _non_static_mode(): if _non_static_mode():
op = getattr(_legacy_C_ops, op_name) op = getattr(_legacy_C_ops, op_name)
......
...@@ -1810,7 +1810,7 @@ class PyReader(DataLoaderBase): ...@@ -1810,7 +1810,7 @@ class PyReader(DataLoaderBase):
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place) place)
for image, label in py_reader(): for image, label in py_reader():
relu = fluid.layers.relu(image) relu = paddle.nn.functional.relu(image)
""" """
def __init__( def __init__(
......
...@@ -62,7 +62,7 @@ def dyfunc_with_if_else2(x, col=100): ...@@ -62,7 +62,7 @@ def dyfunc_with_if_else2(x, col=100):
# col = -1 # col = -1
col = fluid.layers.fill_constant(shape=[1], value=-1, dtype="int64") col = fluid.layers.fill_constant(shape=[1], value=-1, dtype="int64")
if paddle.mean(x).numpy()[0] > x.numpy()[row][col]: if paddle.mean(x).numpy()[0] > x.numpy()[row][col]:
y = fluid.layers.relu(x) y = paddle.nn.functional.relu(x)
else: else:
x_pow = paddle.pow(x, 2) x_pow = paddle.pow(x, 2)
y = paddle.tanh(x_pow) y = paddle.tanh(x_pow)
...@@ -163,7 +163,7 @@ def nested_if_else(x_v): ...@@ -163,7 +163,7 @@ def nested_if_else(x_v):
w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10) w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10)
if y.numpy()[0] < 10: if y.numpy()[0] < 10:
tmp = y * w tmp = y * w
y = fluid.layers.relu(tmp) y = paddle.nn.functional.relu(tmp)
if paddle.mean(y).numpy()[0] < batch_size: if paddle.mean(y).numpy()[0] < batch_size:
y = paddle.abs(y) y = paddle.abs(y)
else: else:
...@@ -273,7 +273,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): ...@@ -273,7 +273,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer):
# Create new var, but is not used. # Create new var, but is not used.
x = 10 x = 10
tmp = y * self.constant_vars['w'] tmp = y * self.constant_vars['w']
y = fluid.layers.relu(tmp) y = paddle.nn.functional.relu(tmp)
# Nested `if/else` # Nested `if/else`
if y.numpy()[-1] < self.alpha: if y.numpy()[-1] < self.alpha:
# Modify variable of class # Modify variable of class
......
...@@ -35,7 +35,7 @@ class FusionGroupPassTest(PassTest): ...@@ -35,7 +35,7 @@ class FusionGroupPassTest(PassTest):
tmp_0 = self.feed_vars[0] * self.feed_vars[1] tmp_0 = self.feed_vars[0] * self.feed_vars[1]
tmp_1 = layers.mul(tmp_0, self.feed_vars[2]) tmp_1 = layers.mul(tmp_0, self.feed_vars[2])
# subgraph with 2 op nodes # subgraph with 2 op nodes
tmp_2 = layers.relu(tmp_0 + tmp_1) tmp_2 = paddle.nn.functional.relu(tmp_0 + tmp_1)
self.append_gradients(tmp_2) self.append_gradients(tmp_2)
...@@ -146,7 +146,7 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest): ...@@ -146,7 +146,7 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest):
tmp_3 = layers.mul(tmp_0, self.feed_vars[2]) tmp_3 = layers.mul(tmp_0, self.feed_vars[2])
# subgraph with 4 op nodes # subgraph with 4 op nodes
tmp_3 = layers.cast(tmp_2, dtype="float16") tmp_3 = layers.cast(tmp_2, dtype="float16")
tmp_4 = layers.relu(tmp_1 + tmp_3) tmp_4 = paddle.nn.functional.relu(tmp_1 + tmp_3)
tmp_5 = layers.cast(tmp_4, dtype=dtype) tmp_5 = layers.cast(tmp_4, dtype=dtype)
tmp_3 = layers.cast(tmp_2, dtype=dtype) tmp_3 = layers.cast(tmp_2, dtype=dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册