未验证 提交 519cc7b0 编写于 作者: W wangguanzhong 提交者: GitHub

split conv2d_op unittest (#33231)

上级 06c63ca0
......@@ -477,6 +477,8 @@ py_test_modules(test_imperative_static_runner_mnist MODULES test_imperative_stat
py_test_modules(test_imperative_static_runner_while MODULES test_imperative_static_runner_while ENVS
FLAGS_cudnn_deterministic=1)
set_tests_properties(test_conv2d_op PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE")
set_tests_properties(test_conv2d_op_depthwise_conv PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE")
set_tests_properties(test_conv2d_api PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE")
if(WITH_DISTRIBUTE)
# FIXME(typhoonzero): add these tests back
list(REMOVE_ITEM DIST_TEST_OPS "test_dist_transformer")
......@@ -838,6 +840,8 @@ set_tests_properties(test_bilinear_interp_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_decoupled_py_reader PROPERTIES TIMEOUT 120)
set_tests_properties(test_fuse_bn_act_pass PROPERTIES TIMEOUT 120)
set_tests_properties(test_conv2d_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_conv2d_op_depthwise_conv PROPERTIES TIMEOUT 120)
set_tests_properties(test_conv2d_api PROPERTIES TIMEOUT 120)
set_tests_properties(test_elementwise_mul_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_cyclic_cifar_dataset PROPERTIES TIMEOUT 120)
set_tests_properties(test_fuse_all_reduce_pass PROPERTIES TIMEOUT 120)
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
paddle.enable_static()
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test import OpTest
from paddle.fluid import Program, program_guard
class TestConv2DAPI(unittest.TestCase):
def test_api(self):
input_NHWC = fluid.layers.data(
name="input_NHWC",
shape=[2, 5, 5, 3],
append_batch_size=False,
dtype="float32")
input_NCHW = fluid.layers.data(
name="input_NCHW",
shape=[2, 3, 5, 5],
append_batch_size=False,
dtype="float32")
fluid.layers.conv2d(
input=input_NHWC,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
data_format="NCHW")
fluid.layers.conv2d(
input=input_NCHW,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[1, 2, 1, 0],
dilation=[1, 1],
groups=1,
data_format="NCHW")
fluid.layers.conv2d(
input=input_NCHW,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[[0, 0], [0, 0], [1, 1], [1, 1]],
dilation=[1, 1],
groups=1,
data_format="NCHW")
fluid.layers.conv2d(
input=input_NHWC,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
dilation=[1, 1],
groups=1,
data_format="NHWC")
fluid.layers.conv2d(
input=input_NCHW,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding="SAME",
dilation=[1, 1],
groups=1,
data_format="NCHW")
fluid.layers.conv2d(
input=input_NCHW,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding="VALID",
dilation=[1, 1],
groups=1,
data_format="NCHW")
def test_depthwise_conv2d(self):
x_var = paddle.uniform((2, 8, 8, 4), dtype='float32', min=-1., max=1.)
conv = paddle.nn.Conv2D(
in_channels=4,
out_channels=4,
kernel_size=(3, 3),
groups=4,
data_format='NHWC')
y_var = conv(x_var)
class TestConv2DAPI_Error(unittest.TestCase):
def test_api(self):
input = fluid.layers.data(
name="input",
shape=[2, 5, 5, 5],
append_batch_size=False,
dtype="float32")
# ValueError: cudnn
def run_1():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
use_cudnn=[0],
data_format="NCHW")
self.assertRaises(ValueError, run_1)
# ValueError: data_format
def run_2():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NCHWC")
self.assertRaises(ValueError, run_2)
# ValueError: padding
def run_3():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding="SAMEE",
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_3)
def run_4():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[[0, 1], [0, 1], [0, 1], [0, 1]],
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_4)
def run_5():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[[0, 1], [0, 1], [0, 1], [0, 1]],
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NHWC")
self.assertRaises(ValueError, run_5)
# ValueError: channel dimmention
x = fluid.layers.data(
name="x",
shape=[2, 5, 5, -1],
append_batch_size=False,
dtype="float32")
def run_6():
fluid.layers.conv2d(
input=x,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NHWC")
self.assertRaises(ValueError, run_6)
# ValueError: groups
def run_7():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=3,
use_cudnn=False,
data_format="NHWC")
self.assertRaises(ValueError, run_7)
# ValueError: filter num
def run_8():
fluid.layers.conv2d(
input=input,
num_filters=0,
filter_size=0,
stride=0,
padding=0,
dilation=0,
groups=1,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_8)
# ValueError: groups
def run_9():
fluid.layers.conv2d(
input=input,
num_filters=0,
filter_size=0,
stride=0,
padding=0,
dilation=0,
groups=0,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_9)
# ValueError: stride
def run_10():
fluid.layers.conv2d(
input=input,
num_filters=1,
filter_size=1,
stride=0,
padding=0,
dilation=0,
groups=1,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_10)
def test_api_with_error_input(self):
input = fluid.layers.data(
name="error_input",
shape=[1],
append_batch_size=False,
dtype="float32")
# ValueError: cudnn
def run_1():
fluid.layers.conv2d(
input=input,
num_filters=0,
filter_size=0,
stride=0,
padding=0,
dilation=0,
groups=0,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_1)
# --------- test environment variable ------
@unittest.skipIf(
not (core.is_compiled_with_cuda() or core.is_compiled_with_rocm()),
"core is not compiled with CUDA or ROCM")
class TestConv2DEnviron(unittest.TestCase):
def run1(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
inputs = fluid.layers.data(
shape=[2, 3, 5, 5],
append_batch_size=False,
name="inputs",
dtype="float32")
result = fluid.layers.conv2d(
input=inputs,
num_filters=4,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
data_format="NCHW")
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
fetches = exe.run(fluid.default_main_program(),
feed={"inputs": self.input_np},
fetch_list=[result])
def run2(self, place):
with fluid.dygraph.guard(place):
inputs = fluid.dygraph.to_variable(self.input_np)
conv = paddle.nn.Conv2D(
in_channels=3,
out_channels=4,
kernel_size=(3, 3),
data_format="NCHW")
result = conv(inputs)
def run3(self, place):
with fluid.dygraph.guard(place):
inputs = fluid.dygraph.to_variable(self.input_np)
conv = paddle.fluid.dygraph.nn.Conv2D(
num_channels=3,
num_filters=4,
filter_size=(3, 3), )
result = conv(inputs)
def run_all(self, place):
self.run1(place)
self.run2(place)
self.run3(place)
def test_environ(self):
self.input_np = np.random.random([2, 3, 5, 5]).astype("float32")
for place in [paddle.CPUPlace(), paddle.CUDAPlace(0)]:
fluid.set_flags({'FLAGS_conv2d_disable_cudnn': False})
self.run_all(place)
fluid.set_flags({'FLAGS_conv2d_disable_cudnn': True})
self.run_all(place)
if __name__ == '__main__':
unittest.main()
......@@ -554,147 +554,6 @@ create_test_cudnn_fp16_class(TestWithGroup, grad_check=False)
create_test_cudnn_fp16_class(TestWith1x1, grad_check=False)
create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False)
#----------------TestDepthwiseConv -----
class TestDepthwiseConv(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation2(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvandFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2andFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3andFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilationandFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestCUDNNExhaustiveSearch(TestConv2DOp):
def init_kernel_type(self):
......@@ -1016,183 +875,6 @@ create_test_cudnn_class(TestWithGroup_AsyPadding)
create_test_cudnn_class(TestWith1x1_AsyPadding)
create_test_cudnn_class(TestWithInput1x1Filter1x1_AsyPadding)
class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 0, 1]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [0, 1, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 0, 0]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 2, 1]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [0, 1, 1, 0]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [2, 1, 2, 3]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 1, 2]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 2, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [2, 1, 1, 0]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 3, 1, 3]
self.padding_algorithm = "EXPLICIT"
#---------- test SAME VALID -----------
create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
create_test_padding_SAME_class(TestWithPad_AsyPadding)
......@@ -1218,18 +900,6 @@ create_test_cudnn_padding_VALID_class(TestWithStride_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithGroup_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding)
# depthwise conv2d
create_test_padding_SAME_class(TestDepthwiseConv_AsyPadding)
create_test_padding_SAME_class(TestDepthwiseConvWithDilation_AsyPadding)
create_test_padding_SAME_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_padding_SAME_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConv_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvWithDilation_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
# ------------ test channel last ---------
create_test_channel_last_class(TestConv2DOp_AsyPadding)
create_test_channel_last_class(TestWithPad_AsyPadding)
......@@ -1237,28 +907,12 @@ create_test_channel_last_class(TestWithGroup_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_channel_last_class(TestDepthwiseConv_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvWithDilation2_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
create_test_cudnn_channel_last_class(TestConv2DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithPad_AsyPadding)
create_test_cudnn_channel_last_class(TestWithStride_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup_AsyPadding)
create_test_cudnn_channel_last_class(TestWithDilation_AsyPadding)
# ------------ depthwise conv2d in MIOPEN ---------
if core.is_compiled_with_rocm():
create_test_cudnn_padding_SAME_class(TestDepthwiseConv_AsyPadding)
create_test_cudnn_padding_SAME_class(
TestDepthwiseConvWithDilation_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConv_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvWithDilation_AsyPadding)
create_test_cudnn_channel_last_class(TestDepthwiseConv_AsyPadding)
create_test_cudnn_channel_last_class(
TestDepthwiseConvWithDilation2_AsyPadding)
create_test_cudnn_channel_last_fp16_class(
TestConv2DOp_AsyPadding, grad_check=False)
create_test_cudnn_channel_last_fp16_class(
......@@ -1270,338 +924,5 @@ create_test_cudnn_channel_last_fp16_class(
create_test_cudnn_channel_last_fp16_class(
TestWithDilation_AsyPadding, grad_check=False)
# --------- test python API ---------------
class TestConv2DAPI(unittest.TestCase):
def test_api(self):
input_NHWC = fluid.layers.data(
name="input_NHWC",
shape=[2, 5, 5, 3],
append_batch_size=False,
dtype="float32")
input_NCHW = fluid.layers.data(
name="input_NCHW",
shape=[2, 3, 5, 5],
append_batch_size=False,
dtype="float32")
fluid.layers.conv2d(
input=input_NHWC,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
data_format="NCHW")
fluid.layers.conv2d(
input=input_NCHW,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[1, 2, 1, 0],
dilation=[1, 1],
groups=1,
data_format="NCHW")
fluid.layers.conv2d(
input=input_NCHW,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[[0, 0], [0, 0], [1, 1], [1, 1]],
dilation=[1, 1],
groups=1,
data_format="NCHW")
fluid.layers.conv2d(
input=input_NHWC,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
dilation=[1, 1],
groups=1,
data_format="NHWC")
fluid.layers.conv2d(
input=input_NCHW,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding="SAME",
dilation=[1, 1],
groups=1,
data_format="NCHW")
fluid.layers.conv2d(
input=input_NCHW,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding="VALID",
dilation=[1, 1],
groups=1,
data_format="NCHW")
def test_depthwise_conv2d(self):
x_var = paddle.uniform((2, 8, 8, 4), dtype='float32', min=-1., max=1.)
conv = paddle.nn.Conv2D(
in_channels=4,
out_channels=4,
kernel_size=(3, 3),
groups=4,
data_format='NHWC')
y_var = conv(x_var)
class TestConv2DAPI_Error(unittest.TestCase):
def test_api(self):
input = fluid.layers.data(
name="input",
shape=[2, 5, 5, 5],
append_batch_size=False,
dtype="float32")
# ValueError: cudnn
def run_1():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
use_cudnn=[0],
data_format="NCHW")
self.assertRaises(ValueError, run_1)
# ValueError: data_format
def run_2():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NCHWC")
self.assertRaises(ValueError, run_2)
# ValueError: padding
def run_3():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding="SAMEE",
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_3)
def run_4():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[[0, 1], [0, 1], [0, 1], [0, 1]],
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_4)
def run_5():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[[0, 1], [0, 1], [0, 1], [0, 1]],
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NHWC")
self.assertRaises(ValueError, run_5)
# ValueError: channel dimmention
x = fluid.layers.data(
name="x",
shape=[2, 5, 5, -1],
append_batch_size=False,
dtype="float32")
def run_6():
fluid.layers.conv2d(
input=x,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
use_cudnn=False,
data_format="NHWC")
self.assertRaises(ValueError, run_6)
# ValueError: groups
def run_7():
fluid.layers.conv2d(
input=input,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=3,
use_cudnn=False,
data_format="NHWC")
self.assertRaises(ValueError, run_7)
# ValueError: filter num
def run_8():
fluid.layers.conv2d(
input=input,
num_filters=0,
filter_size=0,
stride=0,
padding=0,
dilation=0,
groups=1,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_8)
# ValueError: groups
def run_9():
fluid.layers.conv2d(
input=input,
num_filters=0,
filter_size=0,
stride=0,
padding=0,
dilation=0,
groups=0,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_9)
# ValueError: stride
def run_10():
fluid.layers.conv2d(
input=input,
num_filters=1,
filter_size=1,
stride=0,
padding=0,
dilation=0,
groups=1,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_10)
def test_api_with_error_input(self):
input = fluid.layers.data(
name="error_input",
shape=[1],
append_batch_size=False,
dtype="float32")
# ValueError: cudnn
def run_1():
fluid.layers.conv2d(
input=input,
num_filters=0,
filter_size=0,
stride=0,
padding=0,
dilation=0,
groups=0,
use_cudnn=False,
data_format="NCHW")
self.assertRaises(ValueError, run_1)
# --------- test environment variable ------
@unittest.skipIf(
not (core.is_compiled_with_cuda() or core.is_compiled_with_rocm()),
"core is not compiled with CUDA or ROCM")
class TestConv2DEnviron(unittest.TestCase):
def run1(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
inputs = fluid.layers.data(
shape=[2, 3, 5, 5],
append_batch_size=False,
name="inputs",
dtype="float32")
result = fluid.layers.conv2d(
input=inputs,
num_filters=4,
filter_size=[3, 3],
stride=[1, 1],
padding=0,
dilation=[1, 1],
groups=1,
data_format="NCHW")
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
fetches = exe.run(fluid.default_main_program(),
feed={"inputs": self.input_np},
fetch_list=[result])
def run2(self, place):
with fluid.dygraph.guard(place):
inputs = fluid.dygraph.to_variable(self.input_np)
conv = paddle.nn.Conv2D(
in_channels=3,
out_channels=4,
kernel_size=(3, 3),
data_format="NCHW")
result = conv(inputs)
def run3(self, place):
with fluid.dygraph.guard(place):
inputs = fluid.dygraph.to_variable(self.input_np)
conv = paddle.fluid.dygraph.nn.Conv2D(
num_channels=3,
num_filters=4,
filter_size=(3, 3), )
result = conv(inputs)
def run_all(self, place):
self.run1(place)
self.run2(place)
self.run3(place)
def test_environ(self):
self.input_np = np.random.random([2, 3, 5, 5]).astype("float32")
for place in [paddle.CPUPlace(), paddle.CUDAPlace(0)]:
fluid.set_flags({'FLAGS_conv2d_disable_cudnn': False})
self.run_all(place)
fluid.set_flags({'FLAGS_conv2d_disable_cudnn': True})
self.run_all(place)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
paddle.enable_static()
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test import OpTest
from paddle.fluid import Program, program_guard
from test_conv2d_op import TestConv2DOp, TestConv2DOp_v2, create_test_padding_SAME_class, create_test_padding_VALID_class, create_test_channel_last_class, create_test_cudnn_padding_SAME_class, create_test_cudnn_channel_last_class
#----------------TestDepthwiseConv -----
class TestDepthwiseConv(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation2(TestConv2DOp):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvandFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2andFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3andFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilationandFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 0, 1]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [0, 1, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 0, 0]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 2, 1]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [0, 1, 1, 0]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [2, 1, 2, 3]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 1, 2]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 2, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [2, 1, 1, 0]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
self.dilations = [2, 2]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 3, 1, 3]
self.padding_algorithm = "EXPLICIT"
# depthwise conv2d
create_test_padding_SAME_class(TestDepthwiseConv_AsyPadding)
create_test_padding_SAME_class(TestDepthwiseConvWithDilation_AsyPadding)
create_test_padding_SAME_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_padding_SAME_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConv_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvWithDilation_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
# channel last
create_test_channel_last_class(TestDepthwiseConv_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvWithDilation2_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
# ------------ depthwise conv2d in MIOPEN ---------
if core.is_compiled_with_rocm():
create_test_cudnn_padding_SAME_class(TestDepthwiseConv_AsyPadding)
create_test_cudnn_padding_SAME_class(
TestDepthwiseConvWithDilation_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConv_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvWithDilation_AsyPadding)
create_test_cudnn_channel_last_class(TestDepthwiseConv_AsyPadding)
create_test_cudnn_channel_last_class(
TestDepthwiseConvWithDilation2_AsyPadding)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册