From 69eeaf03d50bcb2966aded91aca4a8110cf2333d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=82=85=E5=89=91=E5=AF=92?= Date: Mon, 21 Nov 2022 15:17:43 +0800 Subject: [PATCH] [fluid clean] remove fluid.layers.expand_as in nn.py under fluid (#47931) --- python/paddle/fluid/layers/nn.py | 89 ------------------- .../unittests/auto_parallel_gpt_model.py | 4 +- .../test_auto_parallel_completion_gpt.py | 4 +- .../test_auto_parallel_partitioner_gpt.py | 4 +- .../tests/unittests/test_expand_as_op.py | 44 --------- 5 files changed, 3 insertions(+), 142 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index e9ca037e49..d9253f50a1 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -135,7 +135,6 @@ __all__ = [ 'unique', 'unique_with_counts', 'expand', - 'expand_as', 'scale', 'elementwise_add', 'elementwise_div', @@ -9969,94 +9968,6 @@ def expand(x, expand_times, name=None): return out -@deprecated(since='2.0.0', update_to="paddle.expand_as") -def expand_as(x, target_tensor, name=None): - """ - :alias_main: paddle.expand_as - :alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as - :old_api: paddle.fluid.layers.expand_as - - expand_as operator tiles to the input by given expand tensor. You should set expand tensor - for each dimension by providing tensor 'target_tensor'. The rank of X - should be in [1, 6]. Please note that size of 'target_tensor' must be the same - with X's rank. Following is a using case: - - - .. code-block:: text - - Input(X) is a 3-D tensor with shape [2, 3, 1]: - - [ - [[1], [2], [3]], - [[4], [5], [6]] - ] - - target_tensor's shape: [2, 6, 2] - - Output(Out) is a 3-D tensor with shape [2, 6, 2]: - - [ - [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], - [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] - ] - - - Args: - x (Variable): A Tensor with dtype float64, float32, int32. - A tensor with rank in [1, 6]. - target_tensor (Variable): A Tensor with dtype float64, float32, int32. - target_tensor for expanding to Input(X). Only use target_tensor'shape. - - Returns: - Variable: A Tensor with dtype float64, float32, int32. - After expanding, size of each dimension of Output(Out) is equal to the size - of the corresponding dimension of target_tensor multiplying the corresponding - value given by target_tensor. - - - Examples: - .. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - paddle.enable_static() - - data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64') - target_tensor = fluid.layers.data( - name="target_tensor", shape=[-1,20], dtype='float64') - result = fluid.layers.expand_as(x=data, target_tensor=target_tensor) - use_cuda = False - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - x = np.random.rand(3,10) - y = np.random.rand(3,20) - output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name]) - print(output[0].shape) - #(3,20) - - """ - if _non_static_mode(): - return _legacy_C_ops.expand_as(x, target_tensor) - - check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as' - ) - check_variable_and_dtype( - target_tensor, - 'target_tensor', - ['float32', 'float64', 'int32', 'int64', 'bool'], - 'expand_as', - ) - helper = LayerHelper('expand_as', input=x, **locals()) - dtype = helper.input_dtype(input_param_name='x') - out = helper.create_variable_for_type_inference(dtype) - inputs = {'X': x, 'target_tensor': target_tensor} - helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out}) - return out - - from paddle.fluid.framework import convert_np_dtype_to_dtype_ diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py b/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py index 829e7f7a5d..425f00d121 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py @@ -724,9 +724,7 @@ class GPTModel(nn.Layer): dtype='int64', ) position_ids = position_ids.unsqueeze(0) - position_ids = paddle.fluid.layers.expand_as( - position_ids, input_ids - ) + position_ids = paddle.expand_as(position_ids, input_ids) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids ) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py index 75af22f291..0febac998b 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py @@ -616,9 +616,7 @@ class GPTModel(nn.Layer): ) position_ids = position_ids.unsqueeze(0) # .expand_as(input_ids) - position_ids = paddle.fluid.layers.expand_as( - position_ids, input_ids - ) + position_ids = paddle.expand_as(position_ids, input_ids) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids ) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py index 92528009bd..b65a235124 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py @@ -662,9 +662,7 @@ class GPTModel(nn.Layer): ) position_ids = position_ids.unsqueeze(0) # .expand_as(input_ids) - position_ids = paddle.fluid.layers.expand_as( - position_ids, input_ids - ) + position_ids = paddle.expand_as(position_ids, input_ids) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids ) diff --git a/python/paddle/fluid/tests/unittests/test_expand_as_op.py b/python/paddle/fluid/tests/unittests/test_expand_as_op.py index 827f7a73a7..868f0d269e 100755 --- a/python/paddle/fluid/tests/unittests/test_expand_as_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_as_op.py @@ -15,7 +15,6 @@ import unittest import numpy as np from op_test import OpTest -import paddle.fluid as fluid def bcast(x, target_tensor): @@ -100,48 +99,5 @@ class TestExpandAsOpRank4(OpTest): self.check_grad(['X'], 'Out') -# Test dygraph API -class TestExpandAsDygraphAPI(unittest.TestCase): - def test_api(self): - import paddle - - paddle.disable_static() - np_data_x = np.array([1, 2, 3]).astype('int32') - np_data_y = np.array([1, 2, 3, 1, 2, 3]).astype('int32') - data_x = paddle.to_tensor(np_data_x) - data_y = paddle.to_tensor(np_data_y) - out = fluid.layers.expand_as(data_x, data_y) - np_out = out.numpy() - assert np.array_equal(np_out, np.tile(np_data_x, (2))) - paddle.enable_static() - - -# Test python API -class TestExpandAsAPI(unittest.TestCase): - def test_api(self): - input1 = np.random.random([12, 14]).astype("float32") - input2 = np.random.random([48, 14]).astype("float32") - x = fluid.layers.data( - name='x', shape=[12, 14], append_batch_size=False, dtype="float32" - ) - - y = fluid.layers.data( - name='target_tensor', - shape=[48, 14], - append_batch_size=False, - dtype="float32", - ) - - out_1 = fluid.layers.expand_as(x, target_tensor=y) - - exe = fluid.Executor(place=fluid.CPUPlace()) - res_1 = exe.run( - fluid.default_main_program(), - feed={"x": input1, "target_tensor": input2}, - fetch_list=[out_1], - ) - assert np.array_equal(res_1[0], np.tile(input1, (4, 1))) - - if __name__ == "__main__": unittest.main() -- GitLab