diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index e0588530e68252532684992c5e73054e38ac2fa4..5fead860a6fe55735055868008527e8fd775cc28 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -73,7 +73,6 @@ __all__ = [ 'autoincreased_step_counter', 'unsqueeze', 'lod_reset', - 'relu', 'clip', 'clip_by_norm', 'mul', @@ -1826,51 +1825,6 @@ def lod_reset(x, y=None, target_lod=None): return out -@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu") -def relu(x, name=None): - """ - ${comment} - - Args: - x(Variable): ${x_comment} - name(str, optional): The default value is None. Normally there is no - need for user to set this property. For more information, please - refer to :ref:`api_guide_Name`. - - Returns: - Variable: ${out_comment} - - Examples: - - .. code-block:: python - - import paddle.fluid as fluid - import numpy as np - in1 = np.array([[-1,0],[1,2.6]]) - with fluid.dygraph.guard(): - x1 = fluid.dygraph.to_variable(in1) - out1 = fluid.layers.relu(x1) - print(out1.numpy()) - # [[0. 0. ] - # [1. 2.6]]""" - - if in_dygraph_mode(): - return _C_ops.relu(x) - if _in_legacy_dygraph(): - return _legacy_C_ops.relu(x) - - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu') - - inputs = {'X': [x]} - helper = LayerHelper('relu', **locals()) - dtype = helper.input_dtype(input_param_name='x') - out = helper.create_variable_for_type_inference(dtype) - helper.append_op( - type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out} - ) - return out - - def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): if _non_static_mode(): op = getattr(_legacy_C_ops, op_name) diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index 66427426cbbfff9821935e639ebaa129cf9a59b9..104ac8d864fb3bedd77d8f3c8d0a3adc421c4aca 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -1810,7 +1810,7 @@ class PyReader(DataLoaderBase): paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), place) for image, label in py_reader(): - relu = fluid.layers.relu(image) + relu = paddle.nn.functional.relu(image) """ def __init__( diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py index f0ca20d3df735190c7b4b37c81daf53205d5e8e0..5d08ee96566e198830d5e2981c33b467562e24d6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py @@ -62,7 +62,7 @@ def dyfunc_with_if_else2(x, col=100): # col = -1 col = fluid.layers.fill_constant(shape=[1], value=-1, dtype="int64") if paddle.mean(x).numpy()[0] > x.numpy()[row][col]: - y = fluid.layers.relu(x) + y = paddle.nn.functional.relu(x) else: x_pow = paddle.pow(x, 2) y = paddle.tanh(x_pow) @@ -163,7 +163,7 @@ def nested_if_else(x_v): w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10) if y.numpy()[0] < 10: tmp = y * w - y = fluid.layers.relu(tmp) + y = paddle.nn.functional.relu(tmp) if paddle.mean(y).numpy()[0] < batch_size: y = paddle.abs(y) else: @@ -273,7 +273,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): # Create new var, but is not used. x = 10 tmp = y * self.constant_vars['w'] - y = fluid.layers.relu(tmp) + y = paddle.nn.functional.relu(tmp) # Nested `if/else` if y.numpy()[-1] < self.alpha: # Modify variable of class diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py index 47b65f5626ff6a6a53755701527fc948612db8f2..9b3fd4644cc51c251f07d1cae7a822dc0ba8a522 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py @@ -35,7 +35,7 @@ class FusionGroupPassTest(PassTest): tmp_0 = self.feed_vars[0] * self.feed_vars[1] tmp_1 = layers.mul(tmp_0, self.feed_vars[2]) # subgraph with 2 op nodes - tmp_2 = layers.relu(tmp_0 + tmp_1) + tmp_2 = paddle.nn.functional.relu(tmp_0 + tmp_1) self.append_gradients(tmp_2) @@ -146,7 +146,7 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest): tmp_3 = layers.mul(tmp_0, self.feed_vars[2]) # subgraph with 4 op nodes tmp_3 = layers.cast(tmp_2, dtype="float16") - tmp_4 = layers.relu(tmp_1 + tmp_3) + tmp_4 = paddle.nn.functional.relu(tmp_1 + tmp_3) tmp_5 = layers.cast(tmp_4, dtype=dtype) tmp_3 = layers.cast(tmp_2, dtype=dtype)