# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np import sys sys.path.append("..") import paddle import paddle.fluid.core as core import paddle.fluid as fluid from op_test import OpTest from test_conv2d_op import conv2d_forward_naive paddle.enable_static() def create_test_channel_last_class(parent): class TestChannelLastCase(parent): def init_data_format(self): self.data_format = "NHWC" def init_test_case_2(self): N, C, H, W = self.input_size self.input_size = [N, H, W, C] cls_name = "{0}_{1}".format(parent.__name__, "ChannelLast") TestChannelLastCase.__name__ = cls_name globals()[cls_name] = TestChannelLastCase def create_test_padding_SAME_class(parent): class TestPaddingSMAECase(parent): def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp") TestPaddingSMAECase.__name__ = cls_name globals()[cls_name] = TestPaddingSMAECase def create_test_padding_VALID_class(parent): class TestPaddingVALIDCase(parent): def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp") TestPaddingVALIDCase.__name__ = cls_name globals()[cls_name] = TestPaddingVALIDCase def create_test_fp16_class(parent): class TestFp16Case(parent): def init_dtype(self): self.dtype = np.float16 cls_name = "{0}_{1}".format(parent.__name__, "Fp16") TestFp16Case.__name__ = cls_name globals()[cls_name] = TestFp16Case class TestConv2DOp(OpTest): def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.device.MLUPlace(0) def init_dtype(self): self.dtype = np.float32 def init_data_format(self): self.data_format = "NCHW" def setUp(self): self.set_mlu() self.op_type = "conv2d" self.init_data_format() self.init_dtype() self.init_group() self.init_dilation() self.init_test_case() conv2d_param = { 'stride': self.stride, 'pad': self.pad, 'dilation': self.dilations } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) output, _, _, _, _ = conv2d_forward_naive( input, filter, self.groups, conv2d_param, data_format=self.data_format) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) } self.attrs = { 'strides': self.stride, 'paddings': self.pad, 'groups': self.groups, 'dilations': self.dilations, 'data_format': self.data_format, } self.outputs = {'Output': output} def test_check_output(self): self.check_output_with_place(self.place, atol=1e-2) def test_check_grad(self): if self.dtype == np.float16: return self.check_grad_with_place( self.place, {'Input', 'Filter'}, 'Output', max_relative_error=0.03, numeric_place=paddle.CPUPlace()) def test_check_grad_no_filter(self): if self.dtype == np.float16: return self.check_grad_with_place( self.place, ['Input'], 'Output', max_relative_error=0.03, no_grad_set=set(['Filter']), numeric_place=paddle.CPUPlace()) def test_check_grad_no_input(self): if self.dtype == np.float16: return self.check_grad_with_place( self.place, ['Filter'], 'Output', max_relative_error=0.03, no_grad_set=set(['Input']), numeric_place=paddle.CPUPlace()) def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] def init_dilation(self): self.dilations = [1, 1] def init_group(self): self.groups = 1 class TestWithPad(TestConv2DOp): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] class TestWithStride(TestConv2DOp): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] class TestWithGroup(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.group = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [18, f_c, 3, 3] class TestWith1x1(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [120, f_c, 1, 1] def init_group(self): # FIXME: Supporting group = 3 in this case. # NOTE(wangran16): There is an unknown error (acl error code is : 507015) # when group = 3, which needs to be fixed. self.groups = 1 class TestWithDepthWise5x5(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 4, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [8, f_c, 5, 5] def init_group(self): self.groups = 4 class TestWithDepthWise7x7(TestConv2DOp): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 8, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [16, f_c, 7, 7] def init_group(self): self.groups = 8 class TestWithDilation(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] def init_dilation(self): self.dilations = [2, 2] # TODO(MLU): Depthwise opration does not support dilation yet # it will throw an error of CNNL_STATUS_NOT_SUPPORTED. # def init_group(self): # self.groups = 3 class TestWithInput1x1Filter1x1(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [100, 1, 1, 1] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [120, f_c, 1, 1] def init_group(self): self.groups = 1 class TestConv2DOp_v2(OpTest): def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.device.MLUPlace(0) def setUp(self): self.set_mlu() self.op_type = "conv2d" self.dtype = np.float32 self.init_kernel_type() self.init_group() self.init_dilation() self.init_data_format() self.init_test_case() self.init_paddings() self.init_test_case_2() conv2d_param = { 'stride': self.stride, 'pad': self.pad, 'dilation': self.dilations } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) output, _, _, _, _ = conv2d_forward_naive( input, filter, self.groups, conv2d_param, self.padding_algorithm, self.data_format) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) } self.attrs = { 'strides': self.stride, 'paddings': self.pad, 'padding_algorithm': self.padding_algorithm, 'groups': self.groups, 'dilations': self.dilations, 'data_format': self.data_format, } self.outputs = {'Output': output} def test_check_output(self): self.check_output_with_place(self.place, atol=1e-2) def test_check_grad(self): if self.dtype == np.float16: return self.check_grad_with_place( self.place, {'Input', 'Filter'}, 'Output', max_relative_error=0.02, numeric_place=paddle.CPUPlace()) def test_check_grad_no_filter(self): if self.dtype == np.float16: return self.check_grad_with_place( self.place, ['Input'], 'Output', max_relative_error=0.02, no_grad_set=set(['Filter']), numeric_place=paddle.CPUPlace()) def test_check_grad_no_input(self): if self.dtype == np.float16: return self.check_grad_with_place( self.place, ['Filter'], 'Output', no_grad_set=set(['Input']), numeric_place=paddle.CPUPlace()) def init_test_case(self): self.pad = [0, 0] self.stride = [1, 2] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 4, 3] def init_dilation(self): self.dilations = [1, 1] def init_group(self): self.groups = 1 def init_kernel_type(self): pass def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "EXPLICIT" def init_data_format(self): self.data_format = "NCHW" def init_test_case_2(self): pass class TestConv2DOp_AsyPadding(TestConv2DOp_v2): def init_paddings(self): self.pad = [0, 0, 1, 2] self.padding_algorithm = "EXPLICIT" class TestWithPad_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] def init_paddings(self): self.pad = [2, 1, 3, 2] self.padding_algorithm = "EXPLICIT" class TestWithStride_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] def init_paddings(self): self.pad = [2, 1, 3, 2] self.padding_algorithm = "EXPLICIT" class TestWithGroup_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 2] self.input_size = [2, 3, 5, 5] # NCHW self.group = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 4, 3] class TestWith1x1_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [120, f_c, 1, 1] def init_group(self): self.groups = 1 def init_paddings(self): self.pad = [2, 2, 4, 0] self.padding_algorithm = "EXPLICIT" class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [3, 4, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [16, f_c, 3, 3] # TODO(MLU): Depthwise opration does not support dilation yet # it will throw an error of CNNL_STATUS_NOT_SUPPORTED. # def init_dilation(self): # self.dilations = [2, 2] def init_group(self): self.groups = 4 def init_paddings(self): self.pad = [1, 3, 2, 1] self.padding_algorithm = "EXPLICIT" class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 4, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [8, f_c, 5, 5] def init_group(self): self.groups = 4 def init_paddings(self): self.pad = [0, 1, 1, 0] self.padding_algorithm = "EXPLICIT" class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 8, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [16, f_c, 7, 7] def init_group(self): self.groups = 8 def init_paddings(self): self.pad = [1, 3, 4, 1] self.padding_algorithm = "EXPLICIT" class TestWithDilation_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] def init_dilation(self): self.dilations = [2, 2] # TODO(MLU): Depthwise opration does not support dilation yet # it will throw an error of CNNL_STATUS_NOT_SUPPORTED. # def init_group(self): # self.groups = 3 def init_paddings(self): self.pad = [0, 1, 3, 0] self.padding_algorithm = "EXPLICIT" class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [100, 1, 1, 1] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [120, f_c, 1, 1] def init_group(self): self.groups = 1 def init_paddings(self): self.pad = [0, 3, 4, 0] self.padding_algorithm = "EXPLICIT" create_test_padding_SAME_class(TestConv2DOp_AsyPadding) create_test_padding_SAME_class(TestWithPad_AsyPadding) create_test_padding_SAME_class(TestWithStride_AsyPadding) create_test_padding_SAME_class(TestWithGroup_AsyPadding) create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_padding_VALID_class(TestConv2DOp_AsyPadding) create_test_padding_VALID_class(TestWithPad_AsyPadding) create_test_padding_VALID_class(TestWithStride_AsyPadding) create_test_padding_VALID_class(TestWithGroup_AsyPadding) create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_channel_last_class(TestConv2DOp_AsyPadding) create_test_channel_last_class(TestWithPad_AsyPadding) create_test_channel_last_class(TestWithGroup_AsyPadding) create_test_channel_last_class(TestWith1x1_AsyPadding) create_test_channel_last_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_fp16_class(TestConv2DOp_AsyPadding) create_test_fp16_class(TestWithPad_AsyPadding) create_test_fp16_class(TestWithStride_AsyPadding) create_test_fp16_class(TestWithGroup_AsyPadding) create_test_fp16_class(TestWithInput1x1Filter1x1_AsyPadding) if __name__ == "__main__": unittest.main()