Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
69eeaf03
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
69eeaf03
编写于
11月 21, 2022
作者:
傅
傅剑寒
提交者:
GitHub
11月 21, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[fluid clean] remove fluid.layers.expand_as in nn.py under fluid (#47931)
上级
d92daae2
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
3 addition
and
142 deletion
+3
-142
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+0
-89
python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py
...n/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py
+1
-3
python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py
...luid/tests/unittests/test_auto_parallel_completion_gpt.py
+1
-3
python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py
...uid/tests/unittests/test_auto_parallel_partitioner_gpt.py
+1
-3
python/paddle/fluid/tests/unittests/test_expand_as_op.py
python/paddle/fluid/tests/unittests/test_expand_as_op.py
+0
-44
未找到文件。
python/paddle/fluid/layers/nn.py
浏览文件 @
69eeaf03
...
@@ -135,7 +135,6 @@ __all__ = [
...
@@ -135,7 +135,6 @@ __all__ = [
'unique',
'unique',
'unique_with_counts',
'unique_with_counts',
'expand',
'expand',
'expand_as',
'scale',
'scale',
'elementwise_add',
'elementwise_add',
'elementwise_div',
'elementwise_div',
...
@@ -9969,94 +9968,6 @@ def expand(x, expand_times, name=None):
...
@@ -9969,94 +9968,6 @@ def expand(x, expand_times, name=None):
return out
return out
@deprecated(since='2.0.0', update_to="paddle.expand_as")
def expand_as(x, target_tensor, name=None):
"""
:alias_main: paddle.expand_as
:alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as
:old_api: paddle.fluid.layers.expand_as
expand_as operator tiles to the input by given expand tensor. You should set expand tensor
for each dimension by providing tensor 'target_tensor'. The rank of X
should be in [1, 6]. Please note that size of 'target_tensor' must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
target_tensor's shape: [2, 6, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A Tensor with dtype float64, float32, int32.
A tensor with rank in [1, 6].
target_tensor (Variable): A Tensor with dtype float64, float32, int32.
target_tensor for expanding to Input(X). Only use target_tensor'shape.
Returns:
Variable: A Tensor with dtype float64, float32, int32.
After expanding, size of each dimension of Output(Out) is equal to the size
of the corresponding dimension of target_tensor multiplying the corresponding
value given by target_tensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64')
target_tensor = fluid.layers.data(
name="target_tensor", shape=[-1,20], dtype='float64')
result = fluid.layers.expand_as(x=data, target_tensor=target_tensor)
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,10)
y = np.random.rand(3,20)
output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name])
print(output[0].shape)
#(3,20)
"""
if _non_static_mode():
return _legacy_C_ops.expand_as(x, target_tensor)
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as'
)
check_variable_and_dtype(
target_tensor,
'target_tensor',
['float32', 'float64', 'int32', 'int64', 'bool'],
'expand_as',
)
helper = LayerHelper('expand_as', input=x, **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
inputs = {'X': x, 'target_tensor': target_tensor}
helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out})
return out
from paddle.fluid.framework import convert_np_dtype_to_dtype_
from paddle.fluid.framework import convert_np_dtype_to_dtype_
...
...
python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py
浏览文件 @
69eeaf03
...
@@ -724,9 +724,7 @@ class GPTModel(nn.Layer):
...
@@ -724,9 +724,7 @@ class GPTModel(nn.Layer):
dtype
=
'int64'
,
dtype
=
'int64'
,
)
)
position_ids
=
position_ids
.
unsqueeze
(
0
)
position_ids
=
position_ids
.
unsqueeze
(
0
)
position_ids
=
paddle
.
fluid
.
layers
.
expand_as
(
position_ids
=
paddle
.
expand_as
(
position_ids
,
input_ids
)
position_ids
,
input_ids
)
embedding_output
=
self
.
embeddings
(
embedding_output
=
self
.
embeddings
(
input_ids
=
input_ids
,
position_ids
=
position_ids
input_ids
=
input_ids
,
position_ids
=
position_ids
)
)
...
...
python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py
浏览文件 @
69eeaf03
...
@@ -616,9 +616,7 @@ class GPTModel(nn.Layer):
...
@@ -616,9 +616,7 @@ class GPTModel(nn.Layer):
)
)
position_ids
=
position_ids
.
unsqueeze
(
0
)
position_ids
=
position_ids
.
unsqueeze
(
0
)
# .expand_as(input_ids)
# .expand_as(input_ids)
position_ids
=
paddle
.
fluid
.
layers
.
expand_as
(
position_ids
=
paddle
.
expand_as
(
position_ids
,
input_ids
)
position_ids
,
input_ids
)
embedding_output
=
self
.
embeddings
(
embedding_output
=
self
.
embeddings
(
input_ids
=
input_ids
,
position_ids
=
position_ids
input_ids
=
input_ids
,
position_ids
=
position_ids
)
)
...
...
python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py
浏览文件 @
69eeaf03
...
@@ -662,9 +662,7 @@ class GPTModel(nn.Layer):
...
@@ -662,9 +662,7 @@ class GPTModel(nn.Layer):
)
)
position_ids
=
position_ids
.
unsqueeze
(
0
)
position_ids
=
position_ids
.
unsqueeze
(
0
)
# .expand_as(input_ids)
# .expand_as(input_ids)
position_ids
=
paddle
.
fluid
.
layers
.
expand_as
(
position_ids
=
paddle
.
expand_as
(
position_ids
,
input_ids
)
position_ids
,
input_ids
)
embedding_output
=
self
.
embeddings
(
embedding_output
=
self
.
embeddings
(
input_ids
=
input_ids
,
position_ids
=
position_ids
input_ids
=
input_ids
,
position_ids
=
position_ids
)
)
...
...
python/paddle/fluid/tests/unittests/test_expand_as_op.py
浏览文件 @
69eeaf03
...
@@ -15,7 +15,6 @@
...
@@ -15,7 +15,6 @@
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
def
bcast
(
x
,
target_tensor
):
def
bcast
(
x
,
target_tensor
):
...
@@ -100,48 +99,5 @@ class TestExpandAsOpRank4(OpTest):
...
@@ -100,48 +99,5 @@ class TestExpandAsOpRank4(OpTest):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
# Test dygraph API
class
TestExpandAsDygraphAPI
(
unittest
.
TestCase
):
def
test_api
(
self
):
import
paddle
paddle
.
disable_static
()
np_data_x
=
np
.
array
([
1
,
2
,
3
]).
astype
(
'int32'
)
np_data_y
=
np
.
array
([
1
,
2
,
3
,
1
,
2
,
3
]).
astype
(
'int32'
)
data_x
=
paddle
.
to_tensor
(
np_data_x
)
data_y
=
paddle
.
to_tensor
(
np_data_y
)
out
=
fluid
.
layers
.
expand_as
(
data_x
,
data_y
)
np_out
=
out
.
numpy
()
assert
np
.
array_equal
(
np_out
,
np
.
tile
(
np_data_x
,
(
2
)))
paddle
.
enable_static
()
# Test python API
class
TestExpandAsAPI
(
unittest
.
TestCase
):
def
test_api
(
self
):
input1
=
np
.
random
.
random
([
12
,
14
]).
astype
(
"float32"
)
input2
=
np
.
random
.
random
([
48
,
14
]).
astype
(
"float32"
)
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
12
,
14
],
append_batch_size
=
False
,
dtype
=
"float32"
)
y
=
fluid
.
layers
.
data
(
name
=
'target_tensor'
,
shape
=
[
48
,
14
],
append_batch_size
=
False
,
dtype
=
"float32"
,
)
out_1
=
fluid
.
layers
.
expand_as
(
x
,
target_tensor
=
y
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res_1
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
input1
,
"target_tensor"
:
input2
},
fetch_list
=
[
out_1
],
)
assert
np
.
array_equal
(
res_1
[
0
],
np
.
tile
(
input1
,
(
4
,
1
)))
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录