未验证 提交 c8e18360 编写于 作者: D Double_V 提交者: GitHub

[API 2.0] add pool2d3d API,test=develop (#26331)

* add pool2d3d API,test=develop

* add api unitest,test=develop

* fix unittest, test=develop

* fix reviews, test=develop

* return one element when return indices is true, test=develop

* fix low converage; to_variable to to_tensor, test=develop

* sort API params, test=develop

* fix en doc, merge PR#26108 to here, test=develop

* fix en doc, test=develop
上级 78ca8cf0
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
import paddle.nn.functional as F
import paddle.fluid as fluid
def adaptive_start_index(index, input_size, output_size):
return int(np.floor(index * input_size / output_size))
def adaptive_end_index(index, input_size, output_size):
return int(np.ceil((index + 1) * input_size / output_size))
def max_pool1D_forward_naive(x,
ksize,
strides,
paddings,
global_pool=0,
ceil_mode=False,
exclusive=False,
adaptive=False,
data_type=np.float64):
N, C, L = x.shape
if global_pool == 1:
ksize = [L]
if adaptive:
L_out = ksize[0]
else:
L_out = (L - ksize[0] + 2 * paddings[0] + strides[0] - 1
) // strides[0] + 1 if ceil_mode else (
L - ksize[0] + 2 * paddings[0]) // strides[0] + 1
out = np.zeros((N, C, L_out))
for i in range(L_out):
if adaptive:
r_start = adaptive_start_index(i, L, ksize[0])
r_end = adaptive_end_index(i, L, ksize[0])
else:
r_start = np.max((i * strides[0] - paddings[0], 0))
r_end = np.min((i * strides[0] + ksize[0] - paddings[0], L))
x_masked = x[:, :, r_start:r_end]
out[:, :, i] = np.max(x_masked, axis=(2))
return out
def avg_pool1D_forward_naive(x,
ksize,
strides,
paddings,
global_pool=0,
ceil_mode=False,
exclusive=False,
adaptive=False,
data_type=np.float64):
N, C, L = x.shape
if global_pool == 1:
ksize = [L]
if adaptive:
L_out = ksize[0]
else:
L_out = (L - ksize[0] + 2 * paddings[0] + strides[0] - 1
) // strides[0] + 1 if ceil_mode else (
L - ksize[0] + 2 * paddings[0]) // strides[0] + 1
out = np.zeros((N, C, L_out))
for i in range(L_out):
if adaptive:
r_start = adaptive_start_index(i, L, ksize[0])
r_end = adaptive_end_index(i, L, ksize[0])
else:
r_start = np.max((i * strides[0] - paddings[0], 0))
r_end = np.min((i * strides[0] + ksize[0] - paddings[0], L))
x_masked = x[:, :, r_start:r_end]
field_size = (r_end - r_start) \
if (exclusive or adaptive) else (ksize[0])
if data_type == np.int8 or data_type == np.uint8:
out[:, :, i] = (np.rint(
np.sum(x_masked, axis=(2, 3)) / field_size)).astype(data_type)
else:
out[:, :, i] = (np.sum(x_masked, axis=(2)) /
field_size).astype(data_type)
return out
class TestPool1d_API(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_avg_static_results(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32")
result = F.avg_pool1d(input, kernel_size=2, stride=2, padding=0)
input_np = np.random.random([2, 3, 32]).astype("float32")
result_np = avg_pool1D_forward_naive(
input_np, ksize=[2], strides=[2], paddings=[0], ceil_mode=False)
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], result_np))
def check_avg_dygraph_results(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = F.avg_pool1d(input, kernel_size=2, stride=2, padding=[0])
result_np = avg_pool1D_forward_naive(
input_np, ksize=[2], strides=[2], paddings=[0])
self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool1d_dg = paddle.nn.layer.AvgPool1d(
kernel_size=2, stride=None, padding=0)
result = avg_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_max_static_results(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32")
result = F.max_pool1d(input, kernel_size=2, stride=2, padding=[0])
input_np = np.random.random([2, 3, 32]).astype("float32")
result_np = max_pool1D_forward_naive(
input_np, ksize=[2], strides=[2], paddings=[0])
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], result_np))
def check_max_dygraph_results(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = F.max_pool1d(input, kernel_size=2, stride=2, padding=0)
result_np = max_pool1D_forward_naive(
input_np, ksize=[2], strides=[2], paddings=[0])
self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool1d_dg = paddle.nn.layer.MaxPool1d(
kernel_size=2, stride=None, padding=0)
result = max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_adaptive_max_dygraph_results(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = F.adaptive_max_pool1d(input, output_size=16)
result_np = max_pool1D_forward_naive(
input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True)
self.assertTrue(np.allclose(result.numpy(), result_np))
ada_max_pool1d_dg = paddle.nn.layer.AdaptiveMaxPool1d(
output_size=16)
result = ada_max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_adaptive_avg_dygraph_results(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = F.adaptive_avg_pool1d(input, output_size=16)
result_np = avg_pool1D_forward_naive(
input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True)
self.assertTrue(np.allclose(result.numpy(), result_np))
ada_max_pool1d_dg = paddle.nn.layer.AdaptiveAvgPool1d(
output_size=16)
result = ada_max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_adaptive_max_static_results(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32")
result = F.adaptive_max_pool1d(input, output_size=16)
input_np = np.random.random([2, 3, 32]).astype("float32")
result_np = max_pool1D_forward_naive(
input_np, ksize=[16], strides=[2], paddings=[0], adaptive=True)
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], result_np))
def check_adaptive_avg_static_results(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32")
result = F.adaptive_avg_pool1d(input, output_size=16)
input_np = np.random.random([2, 3, 32]).astype("float32")
result_np = avg_pool1D_forward_naive(
input_np, ksize=[16], strides=[2], paddings=[0], adaptive=True)
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], result_np))
def check_max_dygraph_padding_same(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = F.max_pool1d(
input, kernel_size=2, stride=2, padding="SAME")
result_np = max_pool1D_forward_naive(
input_np, ksize=[2], strides=[2], paddings=[0])
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_avg_dygraph_padding_same(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = F.avg_pool1d(
input, kernel_size=2, stride=2, padding="SAME")
result_np = avg_pool1D_forward_naive(
input_np, ksize=[2], strides=[2], paddings=[0])
self.assertTrue(np.allclose(result.numpy(), result_np))
def test_pool1d(self):
for place in self.places:
self.check_max_dygraph_results(place)
self.check_avg_dygraph_results(place)
self.check_max_static_results(place)
self.check_avg_static_results(place)
self.check_adaptive_max_dygraph_results(place)
self.check_adaptive_avg_dygraph_results(place)
self.check_adaptive_max_static_results(place)
self.check_adaptive_avg_static_results(place)
self.check_max_dygraph_padding_same(place)
self.check_avg_dygraph_padding_same(place)
class TestPool2dError_API(unittest.TestCase):
def test_error_api(self):
def run1():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = [[2]]
res_pd = F.max_pool1d(
input_pd, kernel_size=2, stride=2, padding=padding)
self.assertRaises(ValueError, run1)
def run2():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = [[2]]
res_pd = F.max_pool1d(
input_pd, kernel_size=2, stride=2, padding=padding)
self.assertRaises(ValueError, run2)
def run3():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "padding"
res_pd = F.max_pool1d(
input_pd, kernel_size=2, stride=2, padding=padding)
self.assertRaises(ValueError, run3)
def run4():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "VALID"
res_pd = F.max_pool1d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
ceil_mode=True)
self.assertRaises(ValueError, run4)
def run5():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "VALID"
res_pd = F.max_pool1d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
ceil_mode=True)
self.assertRaises(ValueError, run5)
def run6():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "VALID"
res_pd = F.avg_pool1d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
ceil_mode=True)
self.assertRaises(ValueError, run6)
def run7():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "paddle"
res_pd = F.avg_pool1d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
ceil_mode=True)
self.assertRaises(ValueError, run7)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test_pool2d_op import adaptive_start_index, adaptive_end_index, pool2D_forward_naive
import unittest
from op_test import OpTest
import numpy as np
import paddle.fluid.core as core
from paddle.nn.functional import *
import paddle.fluid as fluid
import paddle
class TestPool2d_API(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_avg_static_results(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(
name="input", shape=[2, 3, 32, 32], dtype="float32")
result = avg_pool2d(input, kernel_size=2, stride=2, padding=0)
input_np = np.random.random([2, 3, 32, 32]).astype("float32")
result_np = pool2D_forward_naive(
input_np,
ksize=[2, 2],
strides=[2, 2],
paddings=[0, 0],
pool_type='avg')
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], result_np))
def check_avg_dygraph_results(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = avg_pool2d(input, kernel_size=2, stride=2, padding=0)
result_np = pool2D_forward_naive(
input_np,
ksize=[2, 2],
strides=[2, 2],
paddings=[0, 0],
pool_type='avg')
self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool2d_dg = paddle.nn.layer.AvgPool2d(
kernel_size=2, stride=2, padding=0)
result = avg_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_max_static_results(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(
name="input", shape=[2, 3, 32, 32], dtype="float32")
result = max_pool2d(input, kernel_size=2, stride=2, padding=0)
input_np = np.random.random([2, 3, 32, 32]).astype("float32")
result_np = pool2D_forward_naive(
input_np,
ksize=[2, 2],
strides=[2, 2],
paddings=[0, 0],
pool_type='max')
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], result_np))
def check_max_dygraph_results(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = max_pool2d(
input, kernel_size=2, stride=2, padding=0, return_indices=False)
result_np = pool2D_forward_naive(
input_np,
ksize=[2, 2],
strides=[2, 2],
paddings=[0, 0],
pool_type='max')
self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool2d_dg = paddle.nn.layer.MaxPool2d(
kernel_size=2, stride=2, padding=0)
result = max_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_max_dygraph_stride_is_none(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result, indices = max_pool2d(
input,
kernel_size=2,
stride=None,
padding="SAME",
return_indices=True)
result_np = pool2D_forward_naive(
input_np,
ksize=[2, 2],
strides=[2, 2],
paddings=[0, 0],
pool_type='max',
padding_algorithm="SAME")
self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool2d_dg = paddle.nn.layer.MaxPool2d(
kernel_size=2, stride=2, padding=0)
result = max_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_avg_dygraph_stride_is_none(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = avg_pool2d(
input, kernel_size=2, stride=None, padding="SAME")
result_np = pool2D_forward_naive(
input_np,
ksize=[2, 2],
strides=[2, 2],
paddings=[0, 0],
pool_type='avg',
padding_algorithm="SAME")
self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool2d_dg = paddle.nn.layer.AvgPool2d(
kernel_size=2, stride=2, padding=0)
result = avg_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_max_dygraph_padding(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
padding = [[0, 0], [0, 0], [0, 0], [0, 0]]
result = max_pool2d(
input,
kernel_size=2,
stride=2,
padding=padding,
return_indices=False)
result_np = pool2D_forward_naive(
input_np,
ksize=[2, 2],
strides=[2, 2],
paddings=[0, 0],
pool_type='max')
self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool2d_dg = paddle.nn.layer.MaxPool2d(
kernel_size=2, stride=2, padding=0)
result = max_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_avg_divisor(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
padding = [[0, 0], [0, 0], [0, 0], [0, 0]]
result = avg_pool2d(
input,
kernel_size=2,
stride=2,
padding=padding,
divisor_override=4)
result_np = pool2D_forward_naive(
input_np,
ksize=[2, 2],
strides=[2, 2],
paddings=[0, 0],
pool_type='avg')
self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool2d_dg = paddle.nn.layer.AvgPool2d(
kernel_size=2, stride=2, padding=0)
result = avg_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def test_pool2d(self):
for place in self.places:
self.check_max_dygraph_results(place)
self.check_avg_dygraph_results(place)
self.check_max_static_results(place)
self.check_avg_static_results(place)
self.check_max_dygraph_stride_is_none(place)
self.check_avg_dygraph_stride_is_none(place)
self.check_max_dygraph_padding(place)
self.check_avg_divisor(place)
class TestPool2dError_API(unittest.TestCase):
def test_error_api(self):
def run1():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = [[0, 1], [0, 0], [0, 0], [0, 0]]
res_pd = max_pool2d(
input_pd, kernel_size=2, stride=2, padding=padding)
self.assertRaises(ValueError, run1)
def run2():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = [[0, 1], [0, 0], [0, 0], [0, 0]]
res_pd = max_pool2d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
data_format='NHWC')
self.assertRaises(ValueError, run2)
def run3():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "padding"
res_pd = max_pool2d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
data_format='NHWC')
self.assertRaises(ValueError, run3)
def run3_avg():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "padding"
res_pd = avg_pool2d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
data_format='NHWC')
self.assertRaises(ValueError, run3_avg)
def run4():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "VALID"
res_pd = max_pool2d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
ceil_mode=True,
data_format='NHWC')
self.assertRaises(ValueError, run4)
def run4_avg():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "VALID"
res_pd = avg_pool2d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
ceil_mode=True,
data_format='NHWC')
self.assertRaises(ValueError, run4_avg)
def run5():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "padding"
res_pd = avg_pool2d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
data_format='NHWC')
self.assertRaises(ValueError, run5)
def run6():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "VALID"
res_pd = avg_pool2d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
ceil_mode=True,
data_format='NHWC')
self.assertRaises(ValueError, run6)
def run7():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "VALID"
res_pd = avg_pool2d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
ceil_mode=False,
data_format='NNNN')
self.assertRaises(ValueError, run7)
def run8():
with fluid.dygraph.guard():
input_np = np.random.uniform(-1, 1,
[2, 3, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = "VALID"
res_pd = max_pool2d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
ceil_mode=False,
data_format='NNNN')
self.assertRaises(ValueError, run8)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import division
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest
import paddle.fluid as fluid
from paddle.nn.functional import avg_pool3d, max_pool3d
from test_pool3d_op import adaptive_start_index, adaptive_end_index, pool3D_forward_naive
class TestPool3d_API(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_avg_static_results(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(
name="input", shape=[2, 3, 32, 32, 32], dtype="float32")
result = avg_pool3d(input, kernel_size=2, stride=2, padding=0)
input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32")
result_np = pool3D_forward_naive(
input_np,
ksize=[2, 2, 2],
strides=[2, 2, 2],
paddings=[0, 0, 0],
pool_type='avg')
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], result_np))
def check_avg_dygraph_results(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = avg_pool3d(input, kernel_size=2, stride=2, padding="SAME")
result_np = pool3D_forward_naive(
input_np,
ksize=[2, 2, 2],
strides=[2, 2, 2],
paddings=[0, 0, 0],
pool_type='avg',
padding_algorithm="SAME")
self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool3d_dg = paddle.nn.layer.AvgPool3d(
kernel_size=2, stride=None, padding="SAME")
result = avg_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_max_static_results(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(
name="input", shape=[2, 3, 32, 32, 32], dtype="float32")
result = max_pool3d(input, kernel_size=2, stride=2, padding=0)
input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32")
result_np = pool3D_forward_naive(
input_np,
ksize=[2, 2, 2],
strides=[2, 2, 2],
paddings=[0, 0, 0],
pool_type='max')
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], result_np))
def check_max_dygraph_results(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result = max_pool3d(input, kernel_size=2, stride=2, padding=0)
result_np = pool3D_forward_naive(
input_np,
ksize=[2, 2, 2],
strides=[2, 2, 2],
paddings=[0, 0, 0],
pool_type='max')
self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool3d_dg = paddle.nn.layer.MaxPool3d(
kernel_size=2, stride=None, padding=0)
result = max_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_max_dygraph_stride_is_none(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
result, indices = max_pool3d(
input,
kernel_size=2,
stride=None,
padding="SAME",
return_indices=True)
result_np = pool3D_forward_naive(
input_np,
ksize=[2, 2, 2],
strides=[2, 2, 2],
paddings=[0, 0, 0],
pool_type='max',
padding_algorithm="SAME")
self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool3d_dg = paddle.nn.layer.MaxPool3d(
kernel_size=2, stride=2, padding=0)
result = max_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_max_dygraph_padding(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
padding = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
result = max_pool3d(input, kernel_size=2, stride=2, padding=padding)
result_np = pool3D_forward_naive(
input_np,
ksize=[2, 2, 2],
strides=[2, 2, 2],
paddings=[0, 0, 0],
pool_type='max')
self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool3d_dg = paddle.nn.layer.MaxPool3d(
kernel_size=2, stride=2, padding=0)
result = max_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
padding = [0, 0, 0, 0, 0, 0]
result = max_pool3d(input, kernel_size=2, stride=2, padding=padding)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_avg_divisor(self, place):
with fluid.dygraph.guard(place):
input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np)
padding = 0
result = avg_pool3d(
input,
kernel_size=2,
stride=2,
padding=padding,
divisor_override=8)
result_np = pool3D_forward_naive(
input_np,
ksize=[2, 2, 2],
strides=[2, 2, 2],
paddings=[0, 0, 0],
pool_type='avg')
self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool3d_dg = paddle.nn.layer.AvgPool3d(
kernel_size=2, stride=2, padding=0)
result = avg_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
padding = [0, 0, 0, 0, 0, 0]
result = avg_pool3d(
input,
kernel_size=2,
stride=2,
padding=padding,
divisor_override=8)
self.assertTrue(np.allclose(result.numpy(), result_np))
def test_pool3d(self):
for place in self.places:
self.check_max_dygraph_results(place)
self.check_avg_dygraph_results(place)
self.check_max_static_results(place)
self.check_avg_static_results(place)
self.check_max_dygraph_stride_is_none(place)
self.check_max_dygraph_padding(place)
self.check_avg_divisor(place)
class TestPool3dError_API(unittest.TestCase):
def test_error_api(self):
def run1():
with fluid.dygraph.guard():
input_np = np.random.uniform(
-1, 1, [2, 3, 32, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = [[0, 1], [0, 0], [0, 0], [0, 0], [0, 0]]
res_pd = avg_pool3d(
input_pd, kernel_size=2, stride=2, padding=padding)
self.assertRaises(ValueError, run1)
def run2():
with fluid.dygraph.guard():
input_np = np.random.uniform(
-1, 1, [2, 3, 32, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = [[0, 1], [0, 0], [0, 0], [0, 0], [0, 0]]
res_pd = avg_pool3d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
data_format='NCDHW')
self.assertRaises(ValueError, run2)
def run3():
with fluid.dygraph.guard():
input_np = np.random.uniform(
-1, 1, [2, 3, 32, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
padding = [[0, 1], [0, 0], [0, 0], [0, 0], [0, 0]]
res_pd = avg_pool3d(
input_pd,
kernel_size=2,
stride=2,
padding=padding,
data_format='NDHWC')
self.assertRaises(ValueError, run3)
def run4():
with fluid.dygraph.guard():
input_np = np.random.uniform(
-1, 1, [2, 3, 32, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
res_pd = avg_pool3d(
input_pd,
kernel_size=2,
stride=2,
padding=0,
data_format='NNNN')
self.assertRaises(ValueError, run4)
def run5():
with fluid.dygraph.guard():
input_np = np.random.uniform(
-1, 1, [2, 3, 32, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
res_pd = max_pool3d(
input_pd,
kernel_size=2,
stride=2,
padding=0,
data_format='NNNN')
self.assertRaises(ValueError, run5)
def run6():
with fluid.dygraph.guard():
input_np = np.random.uniform(
-1, 1, [2, 3, 32, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
res_pd = avg_pool3d(
input_pd,
kernel_size=2,
stride=2,
padding="padding",
data_format='NNNN')
self.assertRaises(ValueError, run6)
def run7():
with fluid.dygraph.guard():
input_np = np.random.uniform(
-1, 1, [2, 3, 32, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
res_pd = max_pool3d(
input_pd,
kernel_size=2,
stride=2,
padding="padding",
data_format='NNNN')
self.assertRaises(ValueError, run7)
def run8():
with fluid.dygraph.guard():
input_np = np.random.uniform(
-1, 1, [2, 3, 32, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
res_pd = avg_pool3d(
input_pd,
kernel_size=2,
stride=2,
padding="VALID",
ceil_mode=True,
data_format='NNNN')
self.assertRaises(ValueError, run8)
def run9():
with fluid.dygraph.guard():
input_np = np.random.uniform(
-1, 1, [2, 3, 32, 32, 32]).astype(np.float32)
input_pd = fluid.dygraph.to_variable(input_np)
res_pd = max_pool3d(
input_pd,
kernel_size=2,
stride=2,
padding="VALID",
ceil_mode=True,
data_format='NNNN')
self.assertRaises(ValueError, run9)
if __name__ == '__main__':
unittest.main()
......@@ -25,6 +25,8 @@ from . import extension
__all__ += extension.__all__
from . import common
__all__ += common.__all__
from . import pooling
__all__ += pooling.__all__
from . import loss
__all__ += loss.__all__
from .activation import brelu #DEFINE_ALIAS
......@@ -166,10 +168,18 @@ from .norm import l2_normalize #DEFINE_ALIAS
from .norm import lrn #DEFINE_ALIAS
from .norm import normalize #DEFINE_ALIAS
# from .norm import spectral_norm #DEFINE_ALIAS
from .pooling import max_pool1d #DEFINE_ALIAS
from .pooling import avg_pool1d #DEFINE_ALIAS
from .pooling import adaptive_max_pool1d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool1d #DEFINE_ALIAS
from .pooling import pool2d #DEFINE_ALIAS
from .pooling import pool3d #DEFINE_ALIAS
from .pooling import adaptive_pool2d #DEFINE_ALIAS
from .pooling import adaptive_pool3d #DEFINE_ALIAS
from .pooling import avg_pool2d #DEFINE_ALIAS
from .pooling import max_pool2d #DEFINE_ALIAS
from .pooling import avg_pool3d #DEFINE_ALIAS
from .pooling import max_pool3d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool2d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool3d #DEFINE_ALIAS
# from .rnn import gru_unit #DEFINE_ALIAS
......
......@@ -60,6 +60,14 @@ from .common import Dropout3D #DEFINE_ALIAS
from .common import AlphaDropout #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool2d #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool3d #DEFINE_ALIAS
from .pooling import AvgPool1d #DEFINE_ALIAS
from .pooling import MaxPool1d #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool1d #DEFINE_ALIAS
from .pooling import AdaptiveMaxPool1d #DEFINE_ALIAS
from .pooling import AvgPool2d #DEFINE_ALIAS
from .pooling import MaxPool2d #DEFINE_ALIAS
from .pooling import AvgPool3d #DEFINE_ALIAS
from .pooling import MaxPool3d #DEFINE_ALIAS
from .conv import Conv1d #DEFINE_ALIAS
from .conv import Conv2d #DEFINE_ALIAS
from .conv import Conv3d #DEFINE_ALIAS
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the common classes to build a neural network
# TODO: define the common classes to build a neural network
from ...fluid.dygraph import BilinearTensorProduct #DEFINE_ALIAS
from ...fluid.dygraph import Pool2D #DEFINE_ALIAS
from ...fluid.dygraph import Embedding #DEFINE_ALIAS
......@@ -583,8 +583,8 @@ class ReflectionPad1d(layers.Layer):
Default is "NCL"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Returns:
None
Examples:
......@@ -642,8 +642,8 @@ class ReplicationPad1d(layers.Layer):
Default is "NCL"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Returns:
None
Examples:
......@@ -657,7 +657,7 @@ class ReplicationPad1d(layers.Layer):
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
......@@ -702,8 +702,8 @@ class ConstantPad1d(layers.Layer):
Default is "NCL"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Returns:
None
Examples:
......@@ -718,7 +718,7 @@ class ConstantPad1d(layers.Layer):
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
......@@ -765,8 +765,8 @@ class ConstantPad2d(layers.Layer):
Default is "NCHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Returns:
None
Examples:
......@@ -781,7 +781,7 @@ class ConstantPad2d(layers.Layer):
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
......@@ -830,8 +830,8 @@ class ZeroPad2d(layers.Layer):
Default is "NCHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Returns:
None
Examples:
......@@ -845,7 +845,7 @@ class ZeroPad2d(layers.Layer):
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
......@@ -892,8 +892,8 @@ class ReplicationPad2d(layers.Layer):
Default is "NCHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Returns:
None
Examples:
......@@ -907,7 +907,7 @@ class ReplicationPad2d(layers.Layer):
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
......@@ -954,8 +954,8 @@ class ReflectionPad2d(layers.Layer):
Default is "NCHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Returns:
None
Examples:
......@@ -969,7 +969,7 @@ class ReflectionPad2d(layers.Layer):
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
......@@ -1019,8 +1019,8 @@ class ConstantPad3d(layers.Layer):
Default is "NCDHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Returns:
None
Examples:
......@@ -1035,7 +1035,7 @@ class ConstantPad3d(layers.Layer):
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
......@@ -1084,8 +1084,8 @@ class ReplicationPad3d(layers.Layer):
Default is "NCDHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Returns:
None
Examples:
......@@ -1099,7 +1099,7 @@ class ReplicationPad3d(layers.Layer):
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
......@@ -1141,7 +1141,7 @@ class CosineSimilarity(layers.Layer):
Parameters:
axis (int): Dimension of vectors to compute cosine similarity. Default is 1.
eps(float): Small value to avoid division by zero. Default is 1e-8.
Returns:
Returns:
None
Examples:
......@@ -1162,7 +1162,7 @@ class CosineSimilarity(layers.Layer):
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册