未验证 提交 69eeaf03 编写于 作者: 傅剑寒 提交者: GitHub

[fluid clean] remove fluid.layers.expand_as in nn.py under fluid (#47931)

上级 d92daae2
...@@ -135,7 +135,6 @@ __all__ = [ ...@@ -135,7 +135,6 @@ __all__ = [
'unique', 'unique',
'unique_with_counts', 'unique_with_counts',
'expand', 'expand',
'expand_as',
'scale', 'scale',
'elementwise_add', 'elementwise_add',
'elementwise_div', 'elementwise_div',
...@@ -9969,94 +9968,6 @@ def expand(x, expand_times, name=None): ...@@ -9969,94 +9968,6 @@ def expand(x, expand_times, name=None):
return out return out
@deprecated(since='2.0.0', update_to="paddle.expand_as")
def expand_as(x, target_tensor, name=None):
"""
:alias_main: paddle.expand_as
:alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as
:old_api: paddle.fluid.layers.expand_as
expand_as operator tiles to the input by given expand tensor. You should set expand tensor
for each dimension by providing tensor 'target_tensor'. The rank of X
should be in [1, 6]. Please note that size of 'target_tensor' must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
target_tensor's shape: [2, 6, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A Tensor with dtype float64, float32, int32.
A tensor with rank in [1, 6].
target_tensor (Variable): A Tensor with dtype float64, float32, int32.
target_tensor for expanding to Input(X). Only use target_tensor'shape.
Returns:
Variable: A Tensor with dtype float64, float32, int32.
After expanding, size of each dimension of Output(Out) is equal to the size
of the corresponding dimension of target_tensor multiplying the corresponding
value given by target_tensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64')
target_tensor = fluid.layers.data(
name="target_tensor", shape=[-1,20], dtype='float64')
result = fluid.layers.expand_as(x=data, target_tensor=target_tensor)
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,10)
y = np.random.rand(3,20)
output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name])
print(output[0].shape)
#(3,20)
"""
if _non_static_mode():
return _legacy_C_ops.expand_as(x, target_tensor)
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as'
)
check_variable_and_dtype(
target_tensor,
'target_tensor',
['float32', 'float64', 'int32', 'int64', 'bool'],
'expand_as',
)
helper = LayerHelper('expand_as', input=x, **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
inputs = {'X': x, 'target_tensor': target_tensor}
helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out})
return out
from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.fluid.framework import convert_np_dtype_to_dtype_
......
...@@ -724,9 +724,7 @@ class GPTModel(nn.Layer): ...@@ -724,9 +724,7 @@ class GPTModel(nn.Layer):
dtype='int64', dtype='int64',
) )
position_ids = position_ids.unsqueeze(0) position_ids = position_ids.unsqueeze(0)
position_ids = paddle.fluid.layers.expand_as( position_ids = paddle.expand_as(position_ids, input_ids)
position_ids, input_ids
)
embedding_output = self.embeddings( embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids input_ids=input_ids, position_ids=position_ids
) )
......
...@@ -616,9 +616,7 @@ class GPTModel(nn.Layer): ...@@ -616,9 +616,7 @@ class GPTModel(nn.Layer):
) )
position_ids = position_ids.unsqueeze(0) position_ids = position_ids.unsqueeze(0)
# .expand_as(input_ids) # .expand_as(input_ids)
position_ids = paddle.fluid.layers.expand_as( position_ids = paddle.expand_as(position_ids, input_ids)
position_ids, input_ids
)
embedding_output = self.embeddings( embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids input_ids=input_ids, position_ids=position_ids
) )
......
...@@ -662,9 +662,7 @@ class GPTModel(nn.Layer): ...@@ -662,9 +662,7 @@ class GPTModel(nn.Layer):
) )
position_ids = position_ids.unsqueeze(0) position_ids = position_ids.unsqueeze(0)
# .expand_as(input_ids) # .expand_as(input_ids)
position_ids = paddle.fluid.layers.expand_as( position_ids = paddle.expand_as(position_ids, input_ids)
position_ids, input_ids
)
embedding_output = self.embeddings( embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids input_ids=input_ids, position_ids=position_ids
) )
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
def bcast(x, target_tensor): def bcast(x, target_tensor):
...@@ -100,48 +99,5 @@ class TestExpandAsOpRank4(OpTest): ...@@ -100,48 +99,5 @@ class TestExpandAsOpRank4(OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
# Test dygraph API
class TestExpandAsDygraphAPI(unittest.TestCase):
def test_api(self):
import paddle
paddle.disable_static()
np_data_x = np.array([1, 2, 3]).astype('int32')
np_data_y = np.array([1, 2, 3, 1, 2, 3]).astype('int32')
data_x = paddle.to_tensor(np_data_x)
data_y = paddle.to_tensor(np_data_y)
out = fluid.layers.expand_as(data_x, data_y)
np_out = out.numpy()
assert np.array_equal(np_out, np.tile(np_data_x, (2)))
paddle.enable_static()
# Test python API
class TestExpandAsAPI(unittest.TestCase):
def test_api(self):
input1 = np.random.random([12, 14]).astype("float32")
input2 = np.random.random([48, 14]).astype("float32")
x = fluid.layers.data(
name='x', shape=[12, 14], append_batch_size=False, dtype="float32"
)
y = fluid.layers.data(
name='target_tensor',
shape=[48, 14],
append_batch_size=False,
dtype="float32",
)
out_1 = fluid.layers.expand_as(x, target_tensor=y)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1 = exe.run(
fluid.default_main_program(),
feed={"x": input1, "target_tensor": input2},
fetch_list=[out_1],
)
assert np.array_equal(res_1[0], np.tile(input1, (4, 1)))
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册