Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
cac5f5a7
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
cac5f5a7
编写于
1月 06, 2023
作者:
W
Weilong Wu
提交者:
GitHub
1月 06, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Eager] polish adaptive series api (#49574)
上级
0019ef0c
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
32 addition
and
40 deletion
+32
-40
python/paddle/nn/functional/pooling.py
python/paddle/nn/functional/pooling.py
+32
-40
未找到文件。
python/paddle/nn/functional/pooling.py
浏览文件 @
cac5f5a7
...
...
@@ -1456,11 +1456,6 @@ def adaptive_avg_pool1d(x, output_size, name=None):
# pool_out shape: [1, 3, 16])
"""
pool_type
=
'avg'
if
not
in_dynamic_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'adaptive_pool2d'
)
check_type
(
output_size
,
'pool_size'
,
(
int
),
'adaptive_pool1d'
)
_check_input
(
x
,
3
)
pool_size
=
[
1
]
+
utils
.
convert_to_list
(
output_size
,
1
,
'pool_size'
)
...
...
@@ -1483,7 +1478,10 @@ def adaptive_avg_pool1d(x, output_size, name=None):
return
squeeze
(
pool_out
,
[
2
])
else
:
l_type
=
"pool2d"
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'adaptive_pool2d'
)
check_type
(
output_size
,
'pool_size'
,
(
int
),
'adaptive_pool1d'
)
helper
=
LayerHelper
(
l_type
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -1562,12 +1560,6 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
# out.shape is [2, 3, 3, 3]
"""
if
not
in_dynamic_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'adaptive_avg_pool2d'
)
check_type
(
data_format
,
'data_format'
,
str
,
'adaptive_avg_pool2d'
)
if
data_format
not
in
[
"NCHW"
,
"NHWC"
]:
raise
ValueError
(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
...
...
@@ -1615,7 +1607,10 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
else
:
l_type
=
'pool2d'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'adaptive_avg_pool2d'
)
check_type
(
data_format
,
'data_format'
,
str
,
'adaptive_avg_pool2d'
)
helper
=
LayerHelper
(
l_type
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -1700,12 +1695,6 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
# out.shape is [2, 3, 3, 3, 3]
"""
if
not
in_dynamic_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'adaptive_avg_pool3d'
)
check_type
(
data_format
,
'data_format'
,
str
,
'adaptive_avg_pool3d'
)
if
data_format
not
in
[
"NCDHW"
,
"NDHWC"
]:
raise
ValueError
(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
...
...
@@ -1746,6 +1735,11 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
else
:
l_type
=
'pool3d'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'adaptive_avg_pool2d'
)
check_type
(
data_format
,
'data_format'
,
str
,
'adaptive_avg_pool2d'
)
helper
=
LayerHelper
(
l_type
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -1810,13 +1804,6 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
pool_out, indices = F.adaptive_max_pool1d(data, output_size=16, return_mask=True)
# pool_out shape: [1, 3, 16] indices shape: [1, 3, 16]
"""
pool_type
=
'max'
if
not
in_dynamic_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'adaptive_max_pool1d'
)
check_type
(
output_size
,
'pool_size'
,
int
,
'adaptive_max_pool1d'
)
check_type
(
return_mask
,
'return_mask'
,
bool
,
'adaptive_max_pool1d'
)
_check_input
(
x
,
3
)
pool_size
=
[
1
]
+
utils
.
convert_to_list
(
output_size
,
1
,
'pool_size'
)
...
...
@@ -1834,6 +1821,12 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
else
:
l_type
=
'max_pool2d_with_index'
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'adaptive_max_pool1d'
)
check_type
(
output_size
,
'pool_size'
,
int
,
'adaptive_max_pool1d'
)
check_type
(
return_mask
,
'return_mask'
,
bool
,
'adaptive_max_pool1d'
)
helper
=
LayerHelper
(
l_type
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -1846,7 +1839,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
inputs
=
{
"X"
:
x
},
outputs
=
outputs
,
attrs
=
{
"pooling_type"
:
pool_type
,
"pooling_type"
:
'max'
,
"ksize"
:
pool_size
,
"adaptive"
:
True
,
},
...
...
@@ -1899,12 +1892,6 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
output_size=[3, 3])
# out.shape is [2, 3, 3, 3]
"""
if
not
in_dynamic_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'adaptive_max_pool2d'
)
check_type
(
return_mask
,
'return_mask'
,
bool
,
'adaptive_max_pool2d'
)
# check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d')
_check_input
(
x
,
4
)
in_h
,
in_w
=
x
.
shape
[
2
:
4
]
...
...
@@ -1924,6 +1911,12 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
else
:
l_type
=
'max_pool2d_with_index'
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'adaptive_max_pool2d'
)
check_type
(
return_mask
,
'return_mask'
,
bool
,
'adaptive_max_pool2d'
)
# check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d')
helper
=
LayerHelper
(
l_type
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -1988,13 +1981,6 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
output_size=[3, 3, 3])
# out.shape is [2, 3, 3, 3, 3]
"""
if
not
in_dynamic_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'adaptive_max_pool3d'
)
check_type
(
return_mask
,
'return_mask'
,
bool
,
'adaptive_max_pool3d'
)
# check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d')
_check_input
(
x
,
5
)
in_l
,
in_h
,
in_w
=
x
.
shape
[
2
:
5
]
...
...
@@ -2018,6 +2004,12 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
else
:
l_type
=
'max_pool3d_with_index'
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'adaptive_max_pool3d'
)
check_type
(
return_mask
,
'return_mask'
,
bool
,
'adaptive_max_pool3d'
)
# check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d')
helper
=
LayerHelper
(
l_type
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录