Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
db68e085
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
db68e085
编写于
8月 29, 2020
作者:
R
ruri
提交者:
GitHub
8月 29, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[API2.0]Unify pooling function and add adaptive max pooling function (#26483)
上级
a1b99fae
变更
12
展开全部
显示空白变更内容
内联
并排
Showing
12 changed file
with
2273 addition
and
1440 deletion
+2273
-1440
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+4
-0
python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py
.../paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py
+119
-0
python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py
.../paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py
+110
-0
python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py
.../paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py
+274
-0
python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py
.../paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py
+293
-0
python/paddle/fluid/tests/unittests/test_pool1d_api.py
python/paddle/fluid/tests/unittests/test_pool1d_api.py
+0
-64
python/paddle/nn/__init__.py
python/paddle/nn/__init__.py
+12
-0
python/paddle/nn/functional/__init__.py
python/paddle/nn/functional/__init__.py
+13
-7
python/paddle/nn/functional/conv.py
python/paddle/nn/functional/conv.py
+12
-12
python/paddle/nn/functional/pooling.py
python/paddle/nn/functional/pooling.py
+719
-814
python/paddle/nn/layer/__init__.py
python/paddle/nn/layer/__init__.py
+8
-6
python/paddle/nn/layer/pooling.py
python/paddle/nn/layer/pooling.py
+709
-537
未找到文件。
python/paddle/fluid/layers/nn.py
浏览文件 @
db68e085
...
...
@@ -1858,6 +1858,7 @@ def conv3d(input,
return helper.append_activation(pre_act)
@deprecated(since="2.0.0", update_to="paddle.nn.functional.pool2d")
@templatedoc()
def pool2d(input,
pool_size=-1,
...
...
@@ -2075,6 +2076,7 @@ def pool2d(input,
return pool_out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.pool3d")
@templatedoc()
def pool3d(input,
pool_size=-1,
...
...
@@ -2303,6 +2305,7 @@ def pool3d(input,
return pool_out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.adaptive_pool2d")
@templatedoc(op_type="pool2d")
def adaptive_pool2d(input,
pool_size,
...
...
@@ -2450,6 +2453,7 @@ def adaptive_pool2d(input,
return (pool_out, mask) if require_index else pool_out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.adaptive_pool3d")
@templatedoc(op_type="pool3d")
def adaptive_pool3d(input,
pool_size,
...
...
python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py
0 → 100644
浏览文件 @
db68e085
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
numpy
as
np
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
import
paddle
import
paddle.nn.functional
as
F
import
paddle.fluid
as
fluid
def
adaptive_start_index
(
index
,
input_size
,
output_size
):
return
int
(
np
.
floor
(
index
*
input_size
/
output_size
))
def
adaptive_end_index
(
index
,
input_size
,
output_size
):
return
int
(
np
.
ceil
((
index
+
1
)
*
input_size
/
output_size
))
def
avg_pool1D_forward_naive
(
x
,
ksize
,
strides
,
paddings
,
global_pool
=
0
,
ceil_mode
=
False
,
exclusive
=
False
,
adaptive
=
False
,
data_type
=
np
.
float64
):
N
,
C
,
L
=
x
.
shape
if
global_pool
==
1
:
ksize
=
[
L
]
if
adaptive
:
L_out
=
ksize
[
0
]
else
:
L_out
=
(
L
-
ksize
[
0
]
+
2
*
paddings
[
0
]
+
strides
[
0
]
-
1
)
//
strides
[
0
]
+
1
if
ceil_mode
else
(
L
-
ksize
[
0
]
+
2
*
paddings
[
0
])
//
strides
[
0
]
+
1
out
=
np
.
zeros
((
N
,
C
,
L_out
))
for
i
in
range
(
L_out
):
if
adaptive
:
r_start
=
adaptive_start_index
(
i
,
L
,
ksize
[
0
])
r_end
=
adaptive_end_index
(
i
,
L
,
ksize
[
0
])
else
:
r_start
=
np
.
max
((
i
*
strides
[
0
]
-
paddings
[
0
],
0
))
r_end
=
np
.
min
((
i
*
strides
[
0
]
+
ksize
[
0
]
-
paddings
[
0
],
L
))
x_masked
=
x
[:,
:,
r_start
:
r_end
]
field_size
=
(
r_end
-
r_start
)
\
if
(
exclusive
or
adaptive
)
else
(
ksize
[
0
])
if
data_type
==
np
.
int8
or
data_type
==
np
.
uint8
:
out
[:,
:,
i
]
=
(
np
.
rint
(
np
.
sum
(
x_masked
,
axis
=
(
2
,
3
))
/
field_size
)).
astype
(
data_type
)
else
:
out
[:,
:,
i
]
=
(
np
.
sum
(
x_masked
,
axis
=
(
2
))
/
field_size
).
astype
(
data_type
)
return
out
class
TestPool1d_API
(
unittest
.
TestCase
):
def
setUp
(
self
):
np
.
random
.
seed
(
123
)
self
.
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
self
.
places
.
append
(
fluid
.
CUDAPlace
(
0
))
def
check_adaptive_avg_dygraph_results
(
self
,
place
):
with
fluid
.
dygraph
.
guard
(
place
):
input_np
=
np
.
random
.
random
([
2
,
3
,
32
]).
astype
(
"float32"
)
input
=
fluid
.
dygraph
.
to_variable
(
input_np
)
result
=
F
.
adaptive_avg_pool1d
(
input
,
output_size
=
16
)
result_np
=
avg_pool1D_forward_naive
(
input_np
,
ksize
=
[
16
],
strides
=
[
0
],
paddings
=
[
0
],
adaptive
=
True
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
))
ada_max_pool1d_dg
=
paddle
.
nn
.
layer
.
AdaptiveAvgPool1d
(
output_size
=
16
)
result
=
ada_max_pool1d_dg
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
))
def
check_adaptive_avg_static_results
(
self
,
place
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input
=
fluid
.
data
(
name
=
"input"
,
shape
=
[
2
,
3
,
32
],
dtype
=
"float32"
)
result
=
F
.
adaptive_avg_pool1d
(
input
,
output_size
=
16
)
input_np
=
np
.
random
.
random
([
2
,
3
,
32
]).
astype
(
"float32"
)
result_np
=
avg_pool1D_forward_naive
(
input_np
,
ksize
=
[
16
],
strides
=
[
2
],
paddings
=
[
0
],
adaptive
=
True
)
exe
=
fluid
.
Executor
(
place
)
fetches
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"input"
:
input_np
},
fetch_list
=
[
result
])
self
.
assertTrue
(
np
.
allclose
(
fetches
[
0
],
result_np
))
def
test_adaptive_avg_pool1d
(
self
):
for
place
in
self
.
places
:
self
.
check_adaptive_avg_dygraph_results
(
place
)
self
.
check_adaptive_avg_static_results
(
place
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py
0 → 100644
浏览文件 @
db68e085
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
numpy
as
np
import
unittest
from
op_test
import
OpTest
import
paddle.fluid.core
as
core
from
paddle.fluid
import
compiler
,
Program
,
program_guard
import
paddle
import
paddle.nn.functional
as
F
import
paddle.fluid
as
fluid
def
adaptive_start_index
(
index
,
input_size
,
output_size
):
return
int
(
np
.
floor
(
index
*
input_size
/
output_size
))
def
adaptive_end_index
(
index
,
input_size
,
output_size
):
return
int
(
np
.
ceil
((
index
+
1
)
*
input_size
/
output_size
))
def
max_pool1D_forward_naive
(
x
,
ksize
,
strides
,
paddings
,
global_pool
=
0
,
ceil_mode
=
False
,
exclusive
=
False
,
adaptive
=
False
,
data_type
=
np
.
float64
):
N
,
C
,
L
=
x
.
shape
if
global_pool
==
1
:
ksize
=
[
L
]
if
adaptive
:
L_out
=
ksize
[
0
]
else
:
L_out
=
(
L
-
ksize
[
0
]
+
2
*
paddings
[
0
]
+
strides
[
0
]
-
1
)
//
strides
[
0
]
+
1
if
ceil_mode
else
(
L
-
ksize
[
0
]
+
2
*
paddings
[
0
])
//
strides
[
0
]
+
1
out
=
np
.
zeros
((
N
,
C
,
L_out
))
for
i
in
range
(
L_out
):
if
adaptive
:
r_start
=
adaptive_start_index
(
i
,
L
,
ksize
[
0
])
r_end
=
adaptive_end_index
(
i
,
L
,
ksize
[
0
])
else
:
r_start
=
np
.
max
((
i
*
strides
[
0
]
-
paddings
[
0
],
0
))
r_end
=
np
.
min
((
i
*
strides
[
0
]
+
ksize
[
0
]
-
paddings
[
0
],
L
))
x_masked
=
x
[:,
:,
r_start
:
r_end
]
out
[:,
:,
i
]
=
np
.
max
(
x_masked
,
axis
=
(
2
))
return
out
class
TestPool1d_API
(
unittest
.
TestCase
):
def
setUp
(
self
):
np
.
random
.
seed
(
123
)
self
.
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
self
.
places
.
append
(
fluid
.
CUDAPlace
(
0
))
def
check_adaptive_max_dygraph_results
(
self
,
place
):
with
fluid
.
dygraph
.
guard
(
place
):
input_np
=
np
.
random
.
random
([
2
,
3
,
32
]).
astype
(
"float32"
)
input
=
fluid
.
dygraph
.
to_variable
(
input_np
)
result
=
F
.
adaptive_max_pool1d
(
input
,
output_size
=
16
)
result_np
=
max_pool1D_forward_naive
(
input_np
,
ksize
=
[
16
],
strides
=
[
0
],
paddings
=
[
0
],
adaptive
=
True
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
))
ada_max_pool1d_dg
=
paddle
.
nn
.
layer
.
AdaptiveMaxPool1d
(
output_size
=
16
)
result
=
ada_max_pool1d_dg
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
))
def
check_adaptive_max_static_results
(
self
,
place
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input
=
fluid
.
data
(
name
=
"input"
,
shape
=
[
2
,
3
,
32
],
dtype
=
"float32"
)
result
=
F
.
adaptive_max_pool1d
(
input
,
output_size
=
16
)
input_np
=
np
.
random
.
random
([
2
,
3
,
32
]).
astype
(
"float32"
)
result_np
=
max_pool1D_forward_naive
(
input_np
,
ksize
=
[
16
],
strides
=
[
2
],
paddings
=
[
0
],
adaptive
=
True
)
exe
=
fluid
.
Executor
(
place
)
fetches
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"input"
:
input_np
},
fetch_list
=
[
result
])
self
.
assertTrue
(
np
.
allclose
(
fetches
[
0
],
result_np
))
def
test_adaptive_max_pool1d
(
self
):
for
place
in
self
.
places
:
self
.
check_adaptive_max_dygraph_results
(
place
)
self
.
check_adaptive_max_static_results
(
place
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py
0 → 100644
浏览文件 @
db68e085
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
from
__future__
import
division
import
unittest
import
numpy
as
np
import
paddle.fluid.core
as
core
from
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
def
adaptive_start_index
(
index
,
input_size
,
output_size
):
return
int
(
np
.
floor
(
index
*
input_size
/
output_size
))
def
adaptive_end_index
(
index
,
input_size
,
output_size
):
return
int
(
np
.
ceil
((
index
+
1
)
*
input_size
/
output_size
))
def
adaptive_pool2d_forward
(
x
,
output_size
,
data_format
=
'NCHW'
,
pool_type
=
"max"
):
N
=
x
.
shape
[
0
]
C
,
H
,
W
=
[
x
.
shape
[
1
],
x
.
shape
[
2
],
x
.
shape
[
3
]]
if
data_format
==
'NCHW'
\
else
[
x
.
shape
[
3
],
x
.
shape
[
1
],
x
.
shape
[
2
]]
if
(
isinstance
(
output_size
,
int
)
or
output_size
==
None
):
H_out
=
output_size
W_out
=
output_size
output_size
=
[
H_out
,
W_out
]
else
:
H_out
,
W_out
=
output_size
if
output_size
[
0
]
==
None
:
output_size
[
0
]
=
H
H_out
=
H
if
output_size
[
1
]
==
None
:
output_size
[
1
]
=
W
W_out
=
W
out
=
np
.
zeros
((
N
,
C
,
H_out
,
W_out
))
if
data_format
==
'NCHW'
\
else
np
.
zeros
((
N
,
H_out
,
W_out
,
C
))
for
i
in
range
(
H_out
):
in_h_start
=
adaptive_start_index
(
i
,
H
,
output_size
[
0
])
in_h_end
=
adaptive_end_index
(
i
,
H
,
output_size
[
0
])
for
j
in
range
(
W_out
):
in_w_start
=
adaptive_start_index
(
j
,
W
,
output_size
[
1
])
in_w_end
=
adaptive_end_index
(
j
,
W
,
output_size
[
1
])
if
data_format
==
'NCHW'
:
x_masked
=
x
[:,
:,
in_h_start
:
in_h_end
,
in_w_start
:
in_w_end
]
if
pool_type
==
'avg'
:
field_size
=
(
(
in_h_end
-
in_h_start
)
*
(
in_w_end
-
in_w_start
))
out
[:,
:,
i
,
j
]
=
np
.
sum
(
x_masked
,
axis
=
(
2
,
3
))
/
field_size
elif
pool_type
==
'max'
:
out
[:,
:,
i
,
j
]
=
np
.
max
(
x_masked
,
axis
=
(
2
,
3
))
elif
data_format
==
'NHWC'
:
x_masked
=
x
[:,
in_h_start
:
in_h_end
,
in_w_start
:
in_w_end
,
:]
if
pool_type
==
'avg'
:
field_size
=
(
(
in_h_end
-
in_h_start
)
*
(
in_w_end
-
in_w_start
))
out
[:,
i
,
j
,
:]
=
np
.
sum
(
x_masked
,
axis
=
(
1
,
2
))
/
field_size
elif
pool_type
==
'max'
:
out
[:,
i
,
j
,
:]
=
np
.
max
(
x_masked
,
axis
=
(
1
,
2
))
return
out
class
TestAdaptiveMaxPool2dAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
x_np
=
np
.
random
.
random
([
2
,
3
,
7
,
7
]).
astype
(
"float32"
)
self
.
res_1_np
=
adaptive_pool2d_forward
(
x
=
self
.
x_np
,
output_size
=
[
3
,
3
],
pool_type
=
"max"
)
self
.
res_2_np
=
adaptive_pool2d_forward
(
x
=
self
.
x_np
,
output_size
=
5
,
pool_type
=
"max"
)
self
.
res_3_np
=
adaptive_pool2d_forward
(
x
=
self
.
x_np
,
output_size
=
[
2
,
5
],
pool_type
=
"max"
)
"""
self.res_4_np = adaptive_pool2d_forward(
x=self.x_np,
output_size=[3, 3],
pool_type="max",
data_format="NHWC")
"""
self
.
res_5_np
=
adaptive_pool2d_forward
(
x
=
self
.
x_np
,
output_size
=
[
None
,
3
],
pool_type
=
"max"
)
def
test_static_graph
(
self
):
for
use_cuda
in
([
False
,
True
]
if
core
.
is_compiled_with_cuda
()
else
[
False
]):
place
=
paddle
.
CUDAPlace
(
0
)
if
use_cuda
else
paddle
.
CPUPlace
()
paddle
.
enable_static
()
x
=
paddle
.
data
(
name
=
"x"
,
shape
=
[
2
,
3
,
7
,
7
],
dtype
=
"float32"
)
out_1
=
paddle
.
nn
.
functional
.
adaptive_max_pool2d
(
x
=
x
,
output_size
=
[
3
,
3
])
out_2
=
paddle
.
nn
.
functional
.
adaptive_max_pool2d
(
x
=
x
,
output_size
=
5
)
out_3
=
paddle
.
nn
.
functional
.
adaptive_max_pool2d
(
x
=
x
,
output_size
=
[
2
,
5
])
#out_4 = paddle.nn.functional.adaptive_max_pool2d(
# x=x, output_size=[3, 3], data_format="NHWC")
out_5
=
paddle
.
nn
.
functional
.
adaptive_max_pool2d
(
x
=
x
,
output_size
=
[
None
,
3
])
exe
=
paddle
.
static
.
Executor
(
place
=
place
)
[
res_1
,
res_2
,
res_3
,
res_5
]
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
self
.
x_np
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_5
])
assert
np
.
allclose
(
res_1
,
self
.
res_1_np
)
assert
np
.
allclose
(
res_2
,
self
.
res_2_np
)
assert
np
.
allclose
(
res_3
,
self
.
res_3_np
)
#assert np.allclose(res_4, self.res_4_np)
assert
np
.
allclose
(
res_5
,
self
.
res_5_np
)
def
test_dynamic_graph
(
self
):
for
use_cuda
in
([
False
,
True
]
if
core
.
is_compiled_with_cuda
()
else
[
False
]):
place
=
paddle
.
CUDAPlace
(
0
)
if
use_cuda
else
paddle
.
CPUPlace
()
paddle
.
disable_static
(
place
=
place
)
x
=
paddle
.
to_variable
(
self
.
x_np
)
out_1
=
paddle
.
nn
.
functional
.
adaptive_max_pool2d
(
x
=
x
,
return_indices
=
False
,
output_size
=
[
3
,
3
])
out_2
=
paddle
.
nn
.
functional
.
adaptive_max_pool2d
(
x
=
x
,
output_size
=
5
)
out_3
=
paddle
.
nn
.
functional
.
adaptive_max_pool2d
(
x
=
x
,
output_size
=
[
2
,
5
])
#out_4 = paddle.nn.functional.adaptive_max_pool2d(
# x=x, output_size=[3, 3], data_format="NHWC")
out_5
=
paddle
.
nn
.
functional
.
adaptive_max_pool2d
(
x
=
x
,
output_size
=
[
None
,
3
])
assert
np
.
allclose
(
out_1
.
numpy
(),
self
.
res_1_np
)
assert
np
.
allclose
(
out_2
.
numpy
(),
self
.
res_2_np
)
assert
np
.
allclose
(
out_3
.
numpy
(),
self
.
res_3_np
)
#assert np.allclose(out_4.numpy(), self.res_4_np)
assert
np
.
allclose
(
out_5
.
numpy
(),
self
.
res_5_np
)
class
TestAdaptiveMaxPool2dClassAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
x_np
=
np
.
random
.
random
([
2
,
3
,
7
,
7
]).
astype
(
"float32"
)
self
.
res_1_np
=
adaptive_pool2d_forward
(
x
=
self
.
x_np
,
output_size
=
[
3
,
3
],
pool_type
=
"max"
)
self
.
res_2_np
=
adaptive_pool2d_forward
(
x
=
self
.
x_np
,
output_size
=
5
,
pool_type
=
"max"
)
self
.
res_3_np
=
adaptive_pool2d_forward
(
x
=
self
.
x_np
,
output_size
=
[
2
,
5
],
pool_type
=
"max"
)
#self.res_4_np = adaptive_pool2d_forward(
# x=self.x_np,
# output_size=[3, 3],
# pool_type="max",
# data_format="NHWC")
self
.
res_5_np
=
adaptive_pool2d_forward
(
x
=
self
.
x_np
,
output_size
=
[
None
,
3
],
pool_type
=
"max"
)
def
test_static_graph
(
self
):
for
use_cuda
in
([
False
,
True
]
if
core
.
is_compiled_with_cuda
()
else
[
False
]):
place
=
paddle
.
CUDAPlace
(
0
)
if
use_cuda
else
paddle
.
CPUPlace
()
paddle
.
enable_static
()
x
=
paddle
.
data
(
name
=
"x"
,
shape
=
[
2
,
3
,
7
,
7
],
dtype
=
"float32"
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool2d
(
output_size
=
[
3
,
3
])
out_1
=
adaptive_max_pool
(
x
=
x
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool2d
(
output_size
=
5
)
out_2
=
adaptive_max_pool
(
x
=
x
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool2d
(
output_size
=
[
2
,
5
])
out_3
=
adaptive_max_pool
(
x
=
x
)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(
# output_size=[3, 3], data_format="NHWC")
# out_4 = adaptive_max_pool(x=x)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool2d
(
output_size
=
[
None
,
3
])
out_5
=
adaptive_max_pool
(
x
=
x
)
exe
=
paddle
.
static
.
Executor
(
place
=
place
)
[
res_1
,
res_2
,
res_3
,
res_5
]
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
self
.
x_np
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_5
])
assert
np
.
allclose
(
res_1
,
self
.
res_1_np
)
assert
np
.
allclose
(
res_2
,
self
.
res_2_np
)
assert
np
.
allclose
(
res_3
,
self
.
res_3_np
)
#assert np.allclose(res_4, self.res_4_np)
assert
np
.
allclose
(
res_5
,
self
.
res_5_np
)
def
test_dynamic_graph
(
self
):
for
use_cuda
in
([
False
,
True
]
if
core
.
is_compiled_with_cuda
()
else
[
False
]):
place
=
paddle
.
CUDAPlace
(
0
)
if
use_cuda
else
paddle
.
CPUPlace
()
paddle
.
disable_static
(
place
=
place
)
x
=
paddle
.
to_variable
(
self
.
x_np
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool2d
(
output_size
=
[
3
,
3
])
out_1
=
adaptive_max_pool
(
x
=
x
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool2d
(
output_size
=
5
)
out_2
=
adaptive_max_pool
(
x
=
x
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool2d
(
output_size
=
[
2
,
5
])
out_3
=
adaptive_max_pool
(
x
=
x
)
#adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(
# output_size=[3, 3], data_format="NHWC")
#out_4 = adaptive_max_pool(x=x)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool2d
(
output_size
=
[
None
,
3
])
out_5
=
adaptive_max_pool
(
x
=
x
)
assert
np
.
allclose
(
out_1
.
numpy
(),
self
.
res_1_np
)
assert
np
.
allclose
(
out_2
.
numpy
(),
self
.
res_2_np
)
assert
np
.
allclose
(
out_3
.
numpy
(),
self
.
res_3_np
)
#assert np.allclose(out_4.numpy(), self.res_4_np)
assert
np
.
allclose
(
out_5
.
numpy
(),
self
.
res_5_np
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py
0 → 100755
浏览文件 @
db68e085
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
from
__future__
import
division
import
unittest
import
numpy
as
np
import
paddle.fluid.core
as
core
from
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
def
adaptive_start_index
(
index
,
input_size
,
output_size
):
return
int
(
np
.
floor
(
index
*
input_size
/
output_size
))
def
adaptive_end_index
(
index
,
input_size
,
output_size
):
return
int
(
np
.
ceil
((
index
+
1
)
*
input_size
/
output_size
))
def
adaptive_pool3d_forward
(
x
,
output_size
,
adaptive
=
True
,
data_format
=
'NCDHW'
,
pool_type
=
'max'
):
N
=
x
.
shape
[
0
]
C
,
D
,
H
,
W
=
[
x
.
shape
[
1
],
x
.
shape
[
2
],
x
.
shape
[
3
],
x
.
shape
[
4
]]
\
if
data_format
==
'NCDHW'
else
[
x
.
shape
[
4
],
x
.
shape
[
1
],
x
.
shape
[
2
],
x
.
shape
[
3
]]
if
(
isinstance
(
output_size
,
int
)
or
output_size
==
None
):
H_out
=
output_size
W_out
=
output_size
D_out
=
output_size
output_size
=
[
D_out
,
H_out
,
W_out
]
else
:
D_out
,
H_out
,
W_out
=
output_size
if
output_size
[
0
]
==
None
:
output_size
[
0
]
=
D
D_out
=
D
if
output_size
[
1
]
==
None
:
output_size
[
1
]
=
H
H_out
=
H
if
output_size
[
2
]
==
None
:
output_size
[
2
]
=
W
W_out
=
W
out
=
np
.
zeros
((
N
,
C
,
D_out
,
H_out
,
W_out
))
if
data_format
==
'NCDHW'
\
else
np
.
zeros
((
N
,
D_out
,
H_out
,
W_out
,
C
))
for
k
in
range
(
D_out
):
d_start
=
adaptive_start_index
(
k
,
D
,
output_size
[
0
])
d_end
=
adaptive_end_index
(
k
,
D
,
output_size
[
0
])
for
i
in
range
(
H_out
):
h_start
=
adaptive_start_index
(
i
,
H
,
output_size
[
1
])
h_end
=
adaptive_end_index
(
i
,
H
,
output_size
[
1
])
for
j
in
range
(
W_out
):
w_start
=
adaptive_start_index
(
j
,
W
,
output_size
[
2
])
w_end
=
adaptive_end_index
(
j
,
W
,
output_size
[
2
])
if
data_format
==
'NCDHW'
:
x_masked
=
x
[:,
:,
d_start
:
d_end
,
h_start
:
h_end
,
w_start
:
w_end
]
if
pool_type
==
'avg'
:
field_size
=
(
d_end
-
d_start
)
*
(
h_end
-
h_start
)
*
(
w_end
-
w_start
)
out
[:,
:,
k
,
i
,
j
]
=
np
.
sum
(
x_masked
,
axis
=
(
2
,
3
,
4
))
/
field_size
elif
pool_type
==
'max'
:
out
[:,
:,
k
,
i
,
j
]
=
np
.
max
(
x_masked
,
axis
=
(
2
,
3
,
4
))
elif
data_format
==
'NDHWC'
:
x_masked
=
x
[:,
d_start
:
d_end
,
h_start
:
h_end
,
w_start
:
w_end
,
:]
if
pool_type
==
'avg'
:
field_size
=
(
d_end
-
d_start
)
*
(
h_end
-
h_start
)
*
(
w_end
-
w_start
)
out
[:,
k
,
i
,
j
,
:]
=
np
.
sum
(
x_masked
,
axis
=
(
1
,
2
,
3
))
/
field_size
elif
pool_type
==
'max'
:
out
[:,
k
,
i
,
j
,
:]
=
np
.
max
(
x_masked
,
axis
=
(
1
,
2
,
3
))
return
out
class
TestAdaptiveMaxPool3dAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
x_np
=
np
.
random
.
random
([
2
,
3
,
5
,
7
,
7
]).
astype
(
"float32"
)
self
.
res_1_np
=
adaptive_pool3d_forward
(
x
=
self
.
x_np
,
output_size
=
[
3
,
3
,
3
],
pool_type
=
"max"
)
self
.
res_2_np
=
adaptive_pool3d_forward
(
x
=
self
.
x_np
,
output_size
=
5
,
pool_type
=
"max"
)
self
.
res_3_np
=
adaptive_pool3d_forward
(
x
=
self
.
x_np
,
output_size
=
[
2
,
3
,
5
],
pool_type
=
"max"
)
self
.
res_4_np
=
adaptive_pool3d_forward
(
x
=
self
.
x_np
,
output_size
=
[
3
,
3
,
3
],
pool_type
=
"max"
,
data_format
=
"NDHWC"
)
self
.
res_5_np
=
adaptive_pool3d_forward
(
x
=
self
.
x_np
,
output_size
=
[
None
,
3
,
None
],
pool_type
=
"max"
)
def
test_static_graph
(
self
):
for
use_cuda
in
([
False
,
True
]
if
core
.
is_compiled_with_cuda
()
else
[
False
]):
place
=
paddle
.
CUDAPlace
(
0
)
if
use_cuda
else
paddle
.
CPUPlace
()
paddle
.
enable_static
()
x
=
paddle
.
data
(
name
=
"x"
,
shape
=
[
2
,
3
,
5
,
7
,
7
],
dtype
=
"float32"
)
out_1
=
paddle
.
nn
.
functional
.
adaptive_max_pool3d
(
x
=
x
,
output_size
=
[
3
,
3
,
3
])
out_2
=
paddle
.
nn
.
functional
.
adaptive_max_pool3d
(
x
=
x
,
output_size
=
5
)
out_3
=
paddle
.
nn
.
functional
.
adaptive_max_pool3d
(
x
=
x
,
output_size
=
[
2
,
3
,
5
])
#out_4 = paddle.nn.functional.adaptive_max_pool3d(
# x=x, output_size=[3, 3, 3], data_format="NDHWC")
out_5
=
paddle
.
nn
.
functional
.
adaptive_max_pool3d
(
x
=
x
,
output_size
=
[
None
,
3
,
None
])
exe
=
paddle
.
static
.
Executor
(
place
=
place
)
[
res_1
,
res_2
,
res_3
,
res_5
]
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
self
.
x_np
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_5
])
assert
np
.
allclose
(
res_1
,
self
.
res_1_np
)
assert
np
.
allclose
(
res_2
,
self
.
res_2_np
)
assert
np
.
allclose
(
res_3
,
self
.
res_3_np
)
#assert np.allclose(res_4, self.res_4_np)
assert
np
.
allclose
(
res_5
,
self
.
res_5_np
)
def
test_dynamic_graph
(
self
):
for
use_cuda
in
([
False
,
True
]
if
core
.
is_compiled_with_cuda
()
else
[
False
]):
place
=
paddle
.
CUDAPlace
(
0
)
if
use_cuda
else
paddle
.
CPUPlace
()
paddle
.
disable_static
(
place
=
place
)
x
=
paddle
.
to_variable
(
self
.
x_np
)
out_1
=
paddle
.
nn
.
functional
.
adaptive_max_pool3d
(
x
=
x
,
output_size
=
[
3
,
3
,
3
])
out_2
=
paddle
.
nn
.
functional
.
adaptive_max_pool3d
(
x
=
x
,
output_size
=
5
)
out_3
=
paddle
.
nn
.
functional
.
adaptive_max_pool3d
(
x
=
x
,
output_size
=
[
2
,
3
,
5
])
#out_4 = paddle.nn.functional.adaptive_max_pool3d(
# x=x, output_size=[3, 3, 3], data_format="NDHWC")
out_5
=
paddle
.
nn
.
functional
.
adaptive_max_pool3d
(
x
=
x
,
output_size
=
[
None
,
3
,
None
])
assert
np
.
allclose
(
out_1
.
numpy
(),
self
.
res_1_np
)
assert
np
.
allclose
(
out_2
.
numpy
(),
self
.
res_2_np
)
assert
np
.
allclose
(
out_3
.
numpy
(),
self
.
res_3_np
)
#assert np.allclose(out_4.numpy(), self.res_4_np)
assert
np
.
allclose
(
out_5
.
numpy
(),
self
.
res_5_np
)
class
TestAdaptiveMaxPool3dClassAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
x_np
=
np
.
random
.
random
([
2
,
3
,
5
,
7
,
7
]).
astype
(
"float32"
)
self
.
res_1_np
=
adaptive_pool3d_forward
(
x
=
self
.
x_np
,
output_size
=
[
3
,
3
,
3
],
pool_type
=
"max"
)
self
.
res_2_np
=
adaptive_pool3d_forward
(
x
=
self
.
x_np
,
output_size
=
5
,
pool_type
=
"max"
)
self
.
res_3_np
=
adaptive_pool3d_forward
(
x
=
self
.
x_np
,
output_size
=
[
2
,
3
,
5
],
pool_type
=
"max"
)
# self.res_4_np = adaptive_pool3d_forward(
# x=self.x_np,
# output_size=[3, 3, 3],
# pool_type="max",
# data_format="NDHWC")
self
.
res_5_np
=
adaptive_pool3d_forward
(
x
=
self
.
x_np
,
output_size
=
[
None
,
3
,
None
],
pool_type
=
"max"
)
def
test_static_graph
(
self
):
for
use_cuda
in
([
False
,
True
]
if
core
.
is_compiled_with_cuda
()
else
[
False
]):
place
=
paddle
.
CUDAPlace
(
0
)
if
use_cuda
else
paddle
.
CPUPlace
()
paddle
.
enable_static
()
x
=
paddle
.
data
(
name
=
"x"
,
shape
=
[
2
,
3
,
5
,
7
,
7
],
dtype
=
"float32"
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool3d
(
output_size
=
[
3
,
3
,
3
])
out_1
=
adaptive_max_pool
(
x
=
x
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool3d
(
output_size
=
5
)
out_2
=
adaptive_max_pool
(
x
=
x
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool3d
(
output_size
=
[
2
,
3
,
5
])
out_3
=
adaptive_max_pool
(
x
=
x
)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(
# output_size=[3, 3, 3], data_format="NDHWC")
# out_4 = adaptive_max_pool(x=x)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool3d
(
output_size
=
[
None
,
3
,
None
])
out_5
=
adaptive_max_pool
(
x
=
x
)
exe
=
paddle
.
static
.
Executor
(
place
=
place
)
[
res_1
,
res_2
,
res_3
,
res_5
]
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
self
.
x_np
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_5
])
assert
np
.
allclose
(
res_1
,
self
.
res_1_np
)
assert
np
.
allclose
(
res_2
,
self
.
res_2_np
)
assert
np
.
allclose
(
res_3
,
self
.
res_3_np
)
# assert np.allclose(res_4, self.res_4_np)
assert
np
.
allclose
(
res_5
,
self
.
res_5_np
)
def
test_dynamic_graph
(
self
):
for
use_cuda
in
([
False
,
True
]
if
core
.
is_compiled_with_cuda
()
else
[
False
]):
place
=
paddle
.
CUDAPlace
(
0
)
if
use_cuda
else
paddle
.
CPUPlace
()
paddle
.
disable_static
(
place
=
place
)
x
=
paddle
.
to_variable
(
self
.
x_np
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool3d
(
output_size
=
[
3
,
3
,
3
])
out_1
=
adaptive_max_pool
(
x
=
x
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool3d
(
output_size
=
5
)
out_2
=
adaptive_max_pool
(
x
=
x
)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool3d
(
output_size
=
[
2
,
3
,
5
])
out_3
=
adaptive_max_pool
(
x
=
x
)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(
# output_size=[3, 3, 3], data_format="NDHWC")
# out_4 = adaptive_max_pool(x=x)
adaptive_max_pool
=
paddle
.
nn
.
AdaptiveMaxPool3d
(
output_size
=
[
None
,
3
,
None
])
out_5
=
adaptive_max_pool
(
x
=
x
)
assert
np
.
allclose
(
out_1
.
numpy
(),
self
.
res_1_np
)
assert
np
.
allclose
(
out_2
.
numpy
(),
self
.
res_2_np
)
assert
np
.
allclose
(
out_3
.
numpy
(),
self
.
res_3_np
)
# assert np.allclose(out_4.numpy(), self.res_4_np)
assert
np
.
allclose
(
out_5
.
numpy
(),
self
.
res_5_np
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_pool1d_api.py
浏览文件 @
db68e085
...
...
@@ -174,66 +174,6 @@ class TestPool1d_API(unittest.TestCase):
result
=
max_pool1d_dg
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
))
def
check_adaptive_max_dygraph_results
(
self
,
place
):
with
fluid
.
dygraph
.
guard
(
place
):
input_np
=
np
.
random
.
random
([
2
,
3
,
32
]).
astype
(
"float32"
)
input
=
fluid
.
dygraph
.
to_variable
(
input_np
)
result
=
F
.
adaptive_max_pool1d
(
input
,
output_size
=
16
)
result_np
=
max_pool1D_forward_naive
(
input_np
,
ksize
=
[
16
],
strides
=
[
0
],
paddings
=
[
0
],
adaptive
=
True
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
))
ada_max_pool1d_dg
=
paddle
.
nn
.
layer
.
AdaptiveMaxPool1d
(
output_size
=
16
)
result
=
ada_max_pool1d_dg
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
))
def
check_adaptive_avg_dygraph_results
(
self
,
place
):
with
fluid
.
dygraph
.
guard
(
place
):
input_np
=
np
.
random
.
random
([
2
,
3
,
32
]).
astype
(
"float32"
)
input
=
fluid
.
dygraph
.
to_variable
(
input_np
)
result
=
F
.
adaptive_avg_pool1d
(
input
,
output_size
=
16
)
result_np
=
avg_pool1D_forward_naive
(
input_np
,
ksize
=
[
16
],
strides
=
[
0
],
paddings
=
[
0
],
adaptive
=
True
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
))
ada_max_pool1d_dg
=
paddle
.
nn
.
layer
.
AdaptiveAvgPool1d
(
output_size
=
16
)
result
=
ada_max_pool1d_dg
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
))
def
check_adaptive_max_static_results
(
self
,
place
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input
=
fluid
.
data
(
name
=
"input"
,
shape
=
[
2
,
3
,
32
],
dtype
=
"float32"
)
result
=
F
.
adaptive_max_pool1d
(
input
,
output_size
=
16
)
input_np
=
np
.
random
.
random
([
2
,
3
,
32
]).
astype
(
"float32"
)
result_np
=
max_pool1D_forward_naive
(
input_np
,
ksize
=
[
16
],
strides
=
[
2
],
paddings
=
[
0
],
adaptive
=
True
)
exe
=
fluid
.
Executor
(
place
)
fetches
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"input"
:
input_np
},
fetch_list
=
[
result
])
self
.
assertTrue
(
np
.
allclose
(
fetches
[
0
],
result_np
))
def
check_adaptive_avg_static_results
(
self
,
place
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input
=
fluid
.
data
(
name
=
"input"
,
shape
=
[
2
,
3
,
32
],
dtype
=
"float32"
)
result
=
F
.
adaptive_avg_pool1d
(
input
,
output_size
=
16
)
input_np
=
np
.
random
.
random
([
2
,
3
,
32
]).
astype
(
"float32"
)
result_np
=
avg_pool1D_forward_naive
(
input_np
,
ksize
=
[
16
],
strides
=
[
2
],
paddings
=
[
0
],
adaptive
=
True
)
exe
=
fluid
.
Executor
(
place
)
fetches
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"input"
:
input_np
},
fetch_list
=
[
result
])
self
.
assertTrue
(
np
.
allclose
(
fetches
[
0
],
result_np
))
def
check_max_dygraph_padding_same
(
self
,
place
):
with
fluid
.
dygraph
.
guard
(
place
):
input_np
=
np
.
random
.
random
([
2
,
3
,
32
]).
astype
(
"float32"
)
...
...
@@ -265,10 +205,6 @@ class TestPool1d_API(unittest.TestCase):
self
.
check_avg_dygraph_results
(
place
)
self
.
check_max_static_results
(
place
)
self
.
check_avg_static_results
(
place
)
self
.
check_adaptive_max_dygraph_results
(
place
)
self
.
check_adaptive_avg_dygraph_results
(
place
)
self
.
check_adaptive_max_static_results
(
place
)
self
.
check_adaptive_avg_static_results
(
place
)
self
.
check_max_dygraph_padding_same
(
place
)
self
.
check_avg_dygraph_padding_same
(
place
)
...
...
python/paddle/nn/__init__.py
浏览文件 @
db68e085
...
...
@@ -97,8 +97,20 @@ from .layer.common import Dropout #DEFINE_ALIAS
from
.layer.common
import
Dropout2D
#DEFINE_ALIAS
from
.layer.common
import
Dropout3D
#DEFINE_ALIAS
from
.layer.common
import
AlphaDropout
#DEFINE_ALIAS
from
.layer.pooling
import
AvgPool1d
#DEFINE_ALIAS
from
.layer.pooling
import
AvgPool2d
#DEFINE_ALIAS
from
.layer.pooling
import
AvgPool3d
#DEFINE_ALIAS
from
.layer.pooling
import
MaxPool1d
#DEFINE_ALIAS
from
.layer.pooling
import
MaxPool2d
#DEFINE_ALIAS
from
.layer.pooling
import
MaxPool3d
#DEFINE_ALIAS
from
.layer.pooling
import
AdaptiveAvgPool1d
#DEFINE_ALIAS
from
.layer.pooling
import
AdaptiveAvgPool2d
#DEFINE_ALIAS
from
.layer.pooling
import
AdaptiveAvgPool3d
#DEFINE_ALIAS
from
.layer.pooling
import
AdaptiveMaxPool1d
#DEFINE_ALIAS
from
.layer.pooling
import
AdaptiveMaxPool2d
#DEFINE_ALIAS
from
.layer.pooling
import
AdaptiveMaxPool3d
#DEFINE_ALIAS
from
.layer.conv
import
Conv1d
#DEFINE_ALIAS
from
.layer.conv
import
Conv2d
#DEFINE_ALIAS
from
.layer.conv
import
Conv3d
#DEFINE_ALIAS
...
...
python/paddle/nn/functional/__init__.py
浏览文件 @
db68e085
...
...
@@ -170,22 +170,28 @@ from .norm import layer_norm #DEFINE_ALIAS
from
.norm
import
lrn
#DEFINE_ALIAS
from
.norm
import
normalize
#DEFINE_ALIAS
# from .norm import spectral_norm #DEFINE_ALIAS
from
.pooling
import
max_pool1d
#DEFINE_ALIAS
from
.pooling
import
avg_pool1d
#DEFINE_ALIAS
from
.pooling
import
adaptive_max_pool1d
#DEFINE_ALIAS
from
.pooling
import
adaptive_avg_pool1d
#DEFINE_ALIAS
from
.pooling
import
pool2d
#DEFINE_ALIAS
from
.pooling
import
pool3d
#DEFINE_ALIAS
from
.pooling
import
avg_pool1d
#DEFINE_ALIAS
from
.pooling
import
adaptive_pool2d
#DEFINE_ALIAS
from
.pooling
import
adaptive_pool3d
#DEFINE_ALIAS
from
.rnn
import
rnn
#DEFINE_ALIAS
from
.rnn
import
birnn
#DEFINE_ALIAS
from
.pooling
import
avg_pool2d
#DEFINE_ALIAS
from
.pooling
import
max_pool2d
#DEFINE_ALIAS
from
.pooling
import
avg_pool3d
#DEFINE_ALIAS
from
.pooling
import
max_pool1d
#DEFINE_ALIAS
from
.pooling
import
max_pool2d
#DEFINE_ALIAS
from
.pooling
import
max_pool3d
#DEFINE_ALIAS
from
.pooling
import
adaptive_pool2d
#DEFINE_ALIAS
from
.pooling
import
adaptive_pool3d
#DEFINE_ALIAS
from
.pooling
import
adaptive_max_pool1d
#DEFINE_ALIAS
from
.pooling
import
adaptive_max_pool2d
#DEFINE_ALIAS
from
.pooling
import
adaptive_max_pool3d
#DEFINE_ALIAS
from
.pooling
import
adaptive_avg_pool1d
#DEFINE_ALIAS
from
.pooling
import
adaptive_avg_pool2d
#DEFINE_ALIAS
from
.pooling
import
adaptive_avg_pool3d
#DEFINE_ALIAS
from
.rnn
import
rnn
#DEFINE_ALIAS
from
.rnn
import
birnn
#DEFINE_ALIAS
# from .rnn import gru_unit #DEFINE_ALIAS
# from .rnn import lstm #DEFINE_ALIAS
# from .rnn import lstm_unit #DEFINE_ALIAS
...
...
python/paddle/nn/functional/conv.py
浏览文件 @
db68e085
...
...
@@ -158,7 +158,7 @@ def conv1d(x,
bias (Tensor, optional): The bias with shape [M,]. Default: None.
stride (int or tuple, optional): The stride size. If stride is a tuple, it must
contain one integers, (stride_size). Default: 1.
padding(int|str|tuple|list, optional): The padding size. Padding coul
e
be in one of the following forms.
padding(int|str|tuple|list, optional): The padding size. Padding coul
d
be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means the feature map is zero paded by size of `padding` on both sides.
3. a list[int] or tuple[int] whose length is 1, which means the feature map is zero paded by size of `padding[0]` on both sides.
...
...
@@ -185,7 +185,7 @@ def conv1d(x,
same with input.
Raises:
ValueError: If the channel dim
ment
ion of the input is less than or equal to zero.
ValueError: If the channel dim
ens
ion of the input is less than or equal to zero.
ValueError: If `data_format` is not "NCL" or "NLC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
...
...
@@ -238,7 +238,7 @@ def conv1d(x,
num_channels
=
x
.
shape
[
channel_dim
]
num_filters
=
weight
.
shape
[
0
]
if
num_channels
<
0
:
raise
ValueError
(
"The channel dim
ment
ion of the input({}) "
raise
ValueError
(
"The channel dim
ens
ion of the input({}) "
"should be defined. Received: {}."
.
format
(
x
.
shape
,
num_channels
))
if
num_channels
%
groups
!=
0
:
...
...
@@ -260,7 +260,7 @@ def conv1d(x,
padding
=
padding
+
[
0
]
else
:
raise
ValueError
(
"The size of padding's dim
mention should
1 or 2. But got padding={}"
.
"The size of padding's dim
ension should be
1 or 2. But got padding={}"
.
format
(
padding
))
stride
=
utils
.
convert_to_list
(
stride
,
1
,
'stride'
)
+
[
1
]
...
...
@@ -424,7 +424,7 @@ def conv2d(x,
Raises:
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If the channel dim
ment
ion of the input is less than or equal to zero.
ValueError: If the channel dim
ens
ion of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
...
...
@@ -465,7 +465,7 @@ def conv2d(x,
num_channels
=
x
.
shape
[
channel_dim
]
num_filters
=
weight
.
shape
[
0
]
if
num_channels
<
0
:
raise
ValueError
(
"The channel dim
ment
ion of the input({}) "
raise
ValueError
(
"The channel dim
ens
ion of the input({}) "
"should be defined. Received: {}."
.
format
(
x
.
shape
,
num_channels
))
if
num_channels
%
groups
!=
0
:
...
...
@@ -710,7 +710,7 @@ def conv_transpose1d(x,
num_channels
=
x
.
shape
[
channel_dim
]
if
num_channels
<
0
:
raise
ValueError
(
"The channel dim
ment
ion of the input({}) "
raise
ValueError
(
"The channel dim
ens
ion of the input({}) "
"should be defined. Received: {}."
.
format
(
x
.
shape
,
num_channels
))
if
num_channels
%
groups
!=
0
:
...
...
@@ -728,7 +728,7 @@ def conv_transpose1d(x,
padding
=
padding
+
[
0
]
else
:
raise
ValueError
(
"The size of padding's dim
ment
ion should 1 or 2. But got padding={}"
.
"The size of padding's dim
ens
ion should 1 or 2. But got padding={}"
.
format
(
padding
))
stride
=
utils
.
convert_to_list
(
stride
,
1
,
'stride'
)
+
[
1
]
...
...
@@ -965,7 +965,7 @@ def conv_transpose2d(x,
channel_dim
=
-
1
if
channel_last
else
1
num_channels
=
x
.
shape
[
channel_dim
]
if
num_channels
<
0
:
raise
ValueError
(
"The channel dim
ment
ion of the input({}) "
raise
ValueError
(
"The channel dim
ens
ion of the input({}) "
"should be defined. Received: {}."
.
format
(
x
.
shape
,
num_channels
))
if
num_channels
%
groups
!=
0
:
...
...
@@ -1146,7 +1146,7 @@ def conv3d(x,
Raises:
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If the channel dim
ment
ion of the input is less than or equal to zero.
ValueError: If the channel dim
ens
ion of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
...
...
@@ -1187,7 +1187,7 @@ def conv3d(x,
num_filters
=
weight
.
shape
[
0
]
if
num_channels
<
0
:
raise
ValueError
(
"The channel dim
ment
ion of the input({}) should be defined. "
"The channel dim
ens
ion of the input({}) should be defined. "
"Received: {}."
.
format
(
x
.
shape
,
num_channels
))
if
num_channels
%
groups
!=
0
:
raise
ValueError
(
...
...
@@ -1422,7 +1422,7 @@ def conv_transpose3d(x,
num_filters
=
weight
.
shape
[
1
]
if
num_channels
<
0
:
raise
ValueError
(
"The channel dim
ment
ion of the input({}) should be defined. "
"The channel dim
ens
ion of the input({}) should be defined. "
"Received: {}."
.
format
(
x
.
shape
,
num_channels
))
if
num_channels
%
groups
!=
0
:
raise
ValueError
(
...
...
python/paddle/nn/functional/pooling.py
浏览文件 @
db68e085
此差异已折叠。
点击以展开。
python/paddle/nn/layer/__init__.py
浏览文件 @
db68e085
...
...
@@ -66,16 +66,18 @@ from .common import Dropout #DEFINE_ALIAS
from
.common
import
Dropout2D
#DEFINE_ALIAS
from
.common
import
Dropout3D
#DEFINE_ALIAS
from
.common
import
AlphaDropout
#DEFINE_ALIAS
from
.pooling
import
AdaptiveAvgPool2d
#DEFINE_ALIAS
from
.pooling
import
AdaptiveAvgPool3d
#DEFINE_ALIAS
from
.pooling
import
AvgPool1d
#DEFINE_ALIAS
from
.pooling
import
MaxPool1d
#DEFINE_ALIAS
from
.pooling
import
AdaptiveAvgPool1d
#DEFINE_ALIAS
from
.pooling
import
AdaptiveMaxPool1d
#DEFINE_ALIAS
from
.pooling
import
AvgPool2d
#DEFINE_ALIAS
from
.pooling
import
MaxPool2d
#DEFINE_ALIAS
from
.pooling
import
AvgPool3d
#DEFINE_ALIAS
from
.pooling
import
MaxPool1d
#DEFINE_ALIAS
from
.pooling
import
MaxPool2d
#DEFINE_ALIAS
from
.pooling
import
MaxPool3d
#DEFINE_ALIAS
from
.pooling
import
AdaptiveAvgPool1d
#DEFINE_ALIAS
from
.pooling
import
AdaptiveAvgPool2d
#DEFINE_ALIAS
from
.pooling
import
AdaptiveAvgPool3d
#DEFINE_ALIAS
from
.pooling
import
AdaptiveMaxPool1d
#DEFINE_ALIAS
from
.pooling
import
AdaptiveMaxPool2d
#DEFINE_ALIAS
from
.pooling
import
AdaptiveMaxPool3d
#DEFINE_ALIAS
from
.conv
import
Conv1d
#DEFINE_ALIAS
from
.conv
import
Conv2d
#DEFINE_ALIAS
from
.conv
import
Conv3d
#DEFINE_ALIAS
...
...
python/paddle/nn/layer/pooling.py
浏览文件 @
db68e085
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录