未验证 提交 2b0c22ad 编写于 作者: L Leo Guo 提交者: GitHub

Modify the unittests of the conv2d_transpose, gaussian_random op. test=kunlun (#43961)

上级 9e3433bd
...@@ -22,9 +22,11 @@ import numpy as np ...@@ -22,9 +22,11 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
from paddle.fluid import Program, program_guard
paddle.enable_static()
def conv2dtranspose_forward_naive(input_, filter_, attrs): def conv2dtranspose_forward_naive(input_, filter_, attrs):
...@@ -117,166 +119,159 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): ...@@ -117,166 +119,159 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
return out return out
class TestConv2DTransposeOp(XPUOpTest): class XPUTestConv2DTransposeOp(XPUOpTestWrapper):
def setUp(self): def __init__(self):
# init as conv transpose self.op_name = 'conv2d_transpose'
self.dtype = np.float32 self.use_dynamic_create_class = False
self.need_check_grad = True
self.is_test = False class TestConv2DTransposeOp(XPUOpTest):
self.use_cudnn = False
self.use_mkldnn = False def setUp(self):
self.output_size = None # init as conv transpose
self.output_padding = [] self.need_check_grad = True
self.data_format = "NCHW" self.is_test = False
self.pad = [0, 0] self.use_cudnn = False
self.padding_algorithm = "EXPLICIT" self.use_mkldnn = False
self.init_op_type() self.output_size = None
self.init_test_case() self.output_padding = []
self.__class__.op_type = "conv2d_transpose" self.data_format = "NCHW"
self.pad = [0, 0]
input_ = np.random.random(self.input_size).astype(self.dtype) self.padding_algorithm = "EXPLICIT"
filter_ = np.random.random(self.filter_size).astype(self.dtype) self.init_op_type()
self.init_test_case()
self.inputs = {'Input': input_, 'Filter': filter_} self.__class__.op_type = "conv2d_transpose"
self.attrs = {
'strides': self.stride, input_ = np.random.random(self.input_size).astype(self.dtype)
'paddings': self.pad, filter_ = np.random.random(self.filter_size).astype(self.dtype)
'padding_algorithm': self.padding_algorithm,
'groups': self.groups, self.inputs = {'Input': input_, 'Filter': filter_}
'dilations': self.dilations, self.attrs = {
'use_cudnn': self.use_cudnn, 'strides': self.stride,
'is_test': self.is_test, 'paddings': self.pad,
'use_mkldnn': self.use_mkldnn, 'padding_algorithm': self.padding_algorithm,
'data_format': self.data_format 'groups': self.groups,
} 'dilations': self.dilations,
if self.output_size is not None: 'use_cudnn': self.use_cudnn,
self.attrs['output_size'] = self.output_size 'is_test': self.is_test,
'use_mkldnn': self.use_mkldnn,
if len(self.output_padding) > 0: 'data_format': self.data_format
self.attrs['output_padding'] = self.output_padding }
if self.output_size is not None:
output = conv2dtranspose_forward_naive(input_, filter_, self.attrs['output_size'] = self.output_size
self.attrs).astype(self.dtype)
if len(self.output_padding) > 0:
self.outputs = {'Output': output} self.attrs['output_padding'] = self.output_padding
def test_check_output(self): output = conv2dtranspose_forward_naive(
if core.is_compiled_with_xpu(): input_, filter_, self.attrs).astype(self.dtype)
paddle.enable_static()
place = paddle.XPUPlace(0) self.outputs = {'Output': output}
self.check_output_with_place(place)
def test_check_output(self):
def test_check_grad_no_input(self): self.check_output_with_place(self.place)
if self.need_check_grad:
if core.is_compiled_with_xpu(): def test_check_grad_no_input(self):
paddle.enable_static() if self.need_check_grad:
place = paddle.XPUPlace(0) self.check_grad_with_place(self.place, ['Filter'],
self.check_grad_with_place(place, ['Filter'],
'Output', 'Output',
no_grad_set=set(['Input'])) no_grad_set=set(['Input']))
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if self.need_check_grad: if self.need_check_grad:
if core.is_compiled_with_xpu(): self.check_grad_with_place(self.place, ['Input'],
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['Input'],
'Output', 'Output',
no_grad_set=set(['Filter'])) no_grad_set=set(['Filter']))
def test_check_grad(self): def test_check_grad(self):
if self.need_check_grad: if self.need_check_grad:
if core.is_compiled_with_xpu(): self.check_grad_with_place(self.place, set(['Input', 'Filter']),
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, set(['Input', 'Filter']),
'Output') 'Output')
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
self.dilations = [1, 1] self.dilations = [1, 1]
self.groups = 1 self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1] f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
def init_op_type(self): def init_op_type(self):
self.op_type = "conv2d_transpose" self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = "conv2d_transpose"
class TestWithSymmetricPad(TestConv2DTransposeOp):
class TestWithSymmetricPad(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1] def init_test_case(self):
self.stride = [1, 1] self.pad = [1, 1]
self.dilations = [1, 1] self.stride = [1, 1]
self.groups = 1 self.dilations = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.groups = 1
f_c = self.input_size[1] self.input_size = [2, 3, 5, 5] # NCHW
self.filter_size = [f_c, 6, 3, 3] f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
class TestWithAsymmetricPad(TestConv2DTransposeOp): class TestWithAsymmetricPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [1, 1] self.stride = [1, 1]
self.dilations = [1, 1] self.dilations = [1, 1]
self.groups = 1 self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1] f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
class TestWithSAMEPad(TestConv2DTransposeOp):
class TestWithSAMEPad(TestConv2DTransposeOp):
def init_test_case(self):
def init_test_case(self): self.stride = [2, 1]
self.stride = [2, 1] self.dilations = [1, 2]
self.dilations = [1, 2] self.groups = 1
self.groups = 1 self.input_size = [2, 3, 6, 5] # NCHW
self.input_size = [2, 3, 6, 5] # NCHW f_c = self.input_size[1]
f_c = self.input_size[1] self.filter_size = [f_c, 6, 4, 3]
self.filter_size = [f_c, 6, 4, 3] self.padding_algorithm = 'SAME'
self.padding_algorithm = 'SAME'
class TestWithVALIDPad(TestConv2DTransposeOp):
class TestWithVALIDPad(TestConv2DTransposeOp): def init_test_case(self):
self.stride = [1, 1]
def init_test_case(self): self.dilations = [1, 1]
self.stride = [1, 1] self.groups = 1
self.dilations = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 1 f_c = self.input_size[1]
self.input_size = [2, 3, 5, 5] # NCHW self.filter_size = [f_c, 6, 3, 3]
f_c = self.input_size[1] self.padding_algorithm = 'VALID'
self.filter_size = [f_c, 6, 3, 3]
self.padding_algorithm = 'VALID' class TestWithGroups(TestConv2DTransposeOp):
def init_test_case(self):
class TestWithGroups(TestConv2DTransposeOp): self.pad = [1, 1]
self.stride = [1, 1]
def init_test_case(self): self.dilations = [1, 1]
self.pad = [1, 1] self.groups = 2
self.stride = [1, 1] self.input_size = [2, 4, 5, 5] # NCHW
self.dilations = [1, 1] f_c = self.input_size[1]
self.groups = 2 self.filter_size = [f_c, 3, 3, 3]
self.input_size = [2, 4, 5, 5] # NCHW
f_c = self.input_size[1] class TestWithStride(TestConv2DTransposeOp):
self.filter_size = [f_c, 3, 3, 3]
def init_test_case(self):
self.pad = [1, 1]
class TestWithStride(TestConv2DTransposeOp): self.stride = [2, 2]
self.dilations = [1, 1]
def init_test_case(self): self.groups = 1
self.pad = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW
self.stride = [2, 2] f_c = self.input_size[1]
self.dilations = [1, 1] self.filter_size = [f_c, 6, 3, 3]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1] support_types = get_xpu_op_support_types('conv2d_transpose')
self.filter_size = [f_c, 6, 3, 3] for stype in support_types:
create_test_class(globals(), XPUTestConv2DTransposeOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -21,25 +21,293 @@ import unittest ...@@ -21,25 +21,293 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core from op_test_xpu import XPUOpTest
from paddle.fluid.op import Operator from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
from paddle.fluid.executor import Executor import paddle
from op_test import OpTest
from test_gaussian_random_op import TestGaussianRandomOp
paddle.enable_static() paddle.enable_static()
class TestXPUGaussianRandomOp(TestGaussianRandomOp): class XPUTestGaussianRandomOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'gaussian_random'
self.use_dynamic_create_class = False
class TestGaussianRandomOp(XPUOpTest):
def init(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = 'gaussian_random'
def setUp(self):
self.init()
self.python_api = paddle.normal
self.set_attrs()
self.inputs = {}
self.use_mkldnn = False
self.attrs = {
"shape": [123, 92],
"mean": self.mean,
"std": self.std,
"seed": 10,
"use_mkldnn": self.use_mkldnn
}
paddle.seed(10)
self.outputs = {'Out': np.zeros((123, 92), dtype=self.dtype)}
def set_attrs(self):
self.mean = 1.0
self.std = 2.
def test_check_output(self):
self.check_output_with_place_customized(self.verify_output,
self.place)
def verify_output(self, outs):
self.assertEqual(outs[0].shape, (123, 92))
hist, _ = np.histogram(outs[0], range=(-3, 5))
hist = hist.astype("float32")
hist /= float(outs[0].size)
data = np.random.normal(size=(123, 92), loc=1, scale=2)
hist2, _ = np.histogram(data, range=(-3, 5))
hist2 = hist2.astype("float32")
hist2 /= float(outs[0].size)
self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.01),
"hist: " + str(hist) + " hist2: " + str(hist2))
class TestMeanStdAreInt(TestGaussianRandomOp):
def set_attrs(self):
self.mean = 1
self.std = 2
# Situation 2: Attr(shape) is a list(with tensor)
class TestGaussianRandomOp_ShapeTensorList(TestGaussianRandomOp):
def setUp(self):
'''Test gaussian_random op with specified value
'''
self.init()
self.init_data()
shape_tensor_list = []
for index, ele in enumerate(self.shape):
shape_tensor_list.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.attrs = {
'shape': self.infer_shape,
'mean': self.mean,
'std': self.std,
'seed': self.seed,
'use_mkldnn': self.use_mkldnn
}
self.inputs = {"ShapeTensorList": shape_tensor_list}
self.outputs = {'Out': np.zeros(self.shape, dtype=self.dtype)}
def init_data(self):
self.shape = [123, 92]
self.infer_shape = [-1, 92]
self.use_mkldnn = False
self.mean = 1.0
self.std = 2.0
self.seed = 10
def test_check_output(self):
self.check_output_with_place_customized(self.verify_output,
self.place)
class TestGaussianRandomOp2_ShapeTensorList(
TestGaussianRandomOp_ShapeTensorList):
def init_data(self):
self.shape = [123, 92]
self.infer_shape = [-1, -1]
self.use_mkldnn = False
self.mean = 1.0
self.std = 2.0
self.seed = 10
class TestGaussianRandomOp3_ShapeTensorList(
TestGaussianRandomOp_ShapeTensorList):
def init_data(self):
self.shape = [123, 92]
self.infer_shape = [123, -1]
self.use_mkldnn = True
self.mean = 1.0
self.std = 2.0
self.seed = 10
class TestGaussianRandomOp4_ShapeTensorList(
TestGaussianRandomOp_ShapeTensorList):
def init_data(self):
self.shape = [123, 92]
self.infer_shape = [123, -1]
self.use_mkldnn = False
self.mean = 1.0
self.std = 2.0
self.seed = 10
# Situation 3: shape is a tensor
class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp):
def setUp(self):
'''Test gaussian_random op with specified value
'''
self.init()
self.init_data()
self.use_mkldnn = False
self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")}
self.attrs = {
'mean': self.mean,
'std': self.std,
'seed': self.seed,
'use_mkldnn': self.use_mkldnn
}
self.outputs = {'Out': np.zeros((123, 92), dtype=self.dtype)}
def init_data(self):
self.shape = [123, 92]
self.use_mkldnn = False
self.mean = 1.0
self.std = 2.0
self.seed = 10
# Test python API
class TestGaussianRandomAPI(unittest.TestCase):
def test_api(self):
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2000)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 500)
shape_tensor_int32 = fluid.data(name="shape_tensor_int32",
shape=[2],
dtype="int32")
shape_tensor_int64 = fluid.data(name="shape_tensor_int64",
shape=[2],
dtype="int64")
out_1 = fluid.layers.gaussian_random(shape=[2000, 500],
dtype="float32",
mean=0.0,
std=1.0,
seed=10)
out_2 = fluid.layers.gaussian_random(shape=[2000, positive_2_int32],
dtype="float32",
mean=0.,
std=1.0,
seed=10)
out_3 = fluid.layers.gaussian_random(shape=[2000, positive_2_int64],
dtype="float32",
mean=0.,
std=1.0,
seed=10)
out_4 = fluid.layers.gaussian_random(shape=shape_tensor_int32,
dtype="float32",
mean=0.,
std=1.0,
seed=10)
out_5 = fluid.layers.gaussian_random(shape=shape_tensor_int64,
dtype="float32",
mean=0.,
std=1.0,
seed=10)
out_6 = fluid.layers.gaussian_random(shape=shape_tensor_int64,
dtype=np.float32,
mean=0.,
std=1.0,
seed=10)
exe = fluid.Executor(place=fluid.XPUPlace(0))
res_1, res_2, res_3, res_4, res_5, res_6 = exe.run(
fluid.default_main_program(),
feed={
"shape_tensor_int32": np.array([2000, 500]).astype("int32"),
"shape_tensor_int64": np.array([2000, 500]).astype("int64"),
},
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6])
self.assertAlmostEqual(np.mean(res_1), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_1), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_2), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_2), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_3), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_3), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_4), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_5), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_5), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_5), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_6), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_6), 1., delta=0.1)
def test_default_dtype(self):
paddle.disable_static()
def test_default_fp16():
paddle.framework.set_default_dtype('float16')
paddle.tensor.random.gaussian([2, 3])
self.assertRaises(TypeError, test_default_fp16)
def test_default_fp32():
paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp64():
paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp64()
test_default_fp32()
paddle.enable_static()
class TestStandardNormalDtype(unittest.TestCase):
def test_default_dtype(self):
paddle.disable_static()
def test_default_fp16():
paddle.framework.set_default_dtype('float16')
paddle.tensor.random.standard_normal([2, 3])
self.assertRaises(TypeError, test_default_fp16)
def test_default_fp32():
paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp64():
paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp64()
test_default_fp32()
paddle.enable_static()
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
outs = self.calc_output(place)
outs = [np.array(out) for out in outs]
outs.sort(key=len)
self.verify_output(outs)
support_types = get_xpu_op_support_types('gaussian_random')
for stype in support_types:
create_test_class(globals(), XPUTestGaussianRandomOp, stype)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册