未验证 提交 2b0c22ad 编写于 作者: L Leo Guo 提交者: GitHub

Modify the unittests of the conv2d_transpose, gaussian_random op. test=kunlun (#43961)

上级 9e3433bd
......@@ -22,9 +22,11 @@ import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
import paddle
import paddle.nn as nn
from paddle.fluid import Program, program_guard
paddle.enable_static()
def conv2dtranspose_forward_naive(input_, filter_, attrs):
......@@ -117,166 +119,159 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
return out
class TestConv2DTransposeOp(XPUOpTest):
def setUp(self):
# init as conv transpose
self.dtype = np.float32
self.need_check_grad = True
self.is_test = False
self.use_cudnn = False
self.use_mkldnn = False
self.output_size = None
self.output_padding = []
self.data_format = "NCHW"
self.pad = [0, 0]
self.padding_algorithm = "EXPLICIT"
self.init_op_type()
self.init_test_case()
self.__class__.op_type = "conv2d_transpose"
input_ = np.random.random(self.input_size).astype(self.dtype)
filter_ = np.random.random(self.filter_size).astype(self.dtype)
self.inputs = {'Input': input_, 'Filter': filter_}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'padding_algorithm': self.padding_algorithm,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'is_test': self.is_test,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format
}
if self.output_size is not None:
self.attrs['output_size'] = self.output_size
if len(self.output_padding) > 0:
self.attrs['output_padding'] = self.output_padding
output = conv2dtranspose_forward_naive(input_, filter_,
self.attrs).astype(self.dtype)
self.outputs = {'Output': output}
def test_check_output(self):
if core.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_no_input(self):
if self.need_check_grad:
if core.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['Filter'],
class XPUTestConv2DTransposeOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'conv2d_transpose'
self.use_dynamic_create_class = False
class TestConv2DTransposeOp(XPUOpTest):
def setUp(self):
# init as conv transpose
self.need_check_grad = True
self.is_test = False
self.use_cudnn = False
self.use_mkldnn = False
self.output_size = None
self.output_padding = []
self.data_format = "NCHW"
self.pad = [0, 0]
self.padding_algorithm = "EXPLICIT"
self.init_op_type()
self.init_test_case()
self.__class__.op_type = "conv2d_transpose"
input_ = np.random.random(self.input_size).astype(self.dtype)
filter_ = np.random.random(self.filter_size).astype(self.dtype)
self.inputs = {'Input': input_, 'Filter': filter_}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'padding_algorithm': self.padding_algorithm,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'is_test': self.is_test,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format
}
if self.output_size is not None:
self.attrs['output_size'] = self.output_size
if len(self.output_padding) > 0:
self.attrs['output_padding'] = self.output_padding
output = conv2dtranspose_forward_naive(
input_, filter_, self.attrs).astype(self.dtype)
self.outputs = {'Output': output}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad_no_input(self):
if self.need_check_grad:
self.check_grad_with_place(self.place, ['Filter'],
'Output',
no_grad_set=set(['Input']))
def test_check_grad_no_filter(self):
if self.need_check_grad:
if core.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['Input'],
def test_check_grad_no_filter(self):
if self.need_check_grad:
self.check_grad_with_place(self.place, ['Input'],
'Output',
no_grad_set=set(['Filter']))
def test_check_grad(self):
if self.need_check_grad:
if core.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, set(['Input', 'Filter']),
def test_check_grad(self):
if self.need_check_grad:
self.check_grad_with_place(self.place, set(['Input', 'Filter']),
'Output')
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
def init_op_type(self):
self.op_type = "conv2d_transpose"
class TestWithSymmetricPad(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
class TestWithAsymmetricPad(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 0, 1, 2]
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
class TestWithSAMEPad(TestConv2DTransposeOp):
def init_test_case(self):
self.stride = [2, 1]
self.dilations = [1, 2]
self.groups = 1
self.input_size = [2, 3, 6, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 4, 3]
self.padding_algorithm = 'SAME'
class TestWithVALIDPad(TestConv2DTransposeOp):
def init_test_case(self):
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
self.padding_algorithm = 'VALID'
class TestWithGroups(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 2
self.input_size = [2, 4, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 3, 3, 3]
class TestWithStride(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [2, 2]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
def init_op_type(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = "conv2d_transpose"
class TestWithSymmetricPad(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
class TestWithAsymmetricPad(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 0, 1, 2]
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
class TestWithSAMEPad(TestConv2DTransposeOp):
def init_test_case(self):
self.stride = [2, 1]
self.dilations = [1, 2]
self.groups = 1
self.input_size = [2, 3, 6, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 4, 3]
self.padding_algorithm = 'SAME'
class TestWithVALIDPad(TestConv2DTransposeOp):
def init_test_case(self):
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
self.padding_algorithm = 'VALID'
class TestWithGroups(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.dilations = [1, 1]
self.groups = 2
self.input_size = [2, 4, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 3, 3, 3]
class TestWithStride(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [2, 2]
self.dilations = [1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
support_types = get_xpu_op_support_types('conv2d_transpose')
for stype in support_types:
create_test_class(globals(), XPUTestConv2DTransposeOp, stype)
if __name__ == '__main__':
unittest.main()
......@@ -21,25 +21,293 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
from op_test import OpTest
from test_gaussian_random_op import TestGaussianRandomOp
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
import paddle
paddle.enable_static()
class TestXPUGaussianRandomOp(TestGaussianRandomOp):
class XPUTestGaussianRandomOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'gaussian_random'
self.use_dynamic_create_class = False
class TestGaussianRandomOp(XPUOpTest):
def init(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = 'gaussian_random'
def setUp(self):
self.init()
self.python_api = paddle.normal
self.set_attrs()
self.inputs = {}
self.use_mkldnn = False
self.attrs = {
"shape": [123, 92],
"mean": self.mean,
"std": self.std,
"seed": 10,
"use_mkldnn": self.use_mkldnn
}
paddle.seed(10)
self.outputs = {'Out': np.zeros((123, 92), dtype=self.dtype)}
def set_attrs(self):
self.mean = 1.0
self.std = 2.
def test_check_output(self):
self.check_output_with_place_customized(self.verify_output,
self.place)
def verify_output(self, outs):
self.assertEqual(outs[0].shape, (123, 92))
hist, _ = np.histogram(outs[0], range=(-3, 5))
hist = hist.astype("float32")
hist /= float(outs[0].size)
data = np.random.normal(size=(123, 92), loc=1, scale=2)
hist2, _ = np.histogram(data, range=(-3, 5))
hist2 = hist2.astype("float32")
hist2 /= float(outs[0].size)
self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.01),
"hist: " + str(hist) + " hist2: " + str(hist2))
class TestMeanStdAreInt(TestGaussianRandomOp):
def set_attrs(self):
self.mean = 1
self.std = 2
# Situation 2: Attr(shape) is a list(with tensor)
class TestGaussianRandomOp_ShapeTensorList(TestGaussianRandomOp):
def setUp(self):
'''Test gaussian_random op with specified value
'''
self.init()
self.init_data()
shape_tensor_list = []
for index, ele in enumerate(self.shape):
shape_tensor_list.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.attrs = {
'shape': self.infer_shape,
'mean': self.mean,
'std': self.std,
'seed': self.seed,
'use_mkldnn': self.use_mkldnn
}
self.inputs = {"ShapeTensorList": shape_tensor_list}
self.outputs = {'Out': np.zeros(self.shape, dtype=self.dtype)}
def init_data(self):
self.shape = [123, 92]
self.infer_shape = [-1, 92]
self.use_mkldnn = False
self.mean = 1.0
self.std = 2.0
self.seed = 10
def test_check_output(self):
self.check_output_with_place_customized(self.verify_output,
self.place)
class TestGaussianRandomOp2_ShapeTensorList(
TestGaussianRandomOp_ShapeTensorList):
def init_data(self):
self.shape = [123, 92]
self.infer_shape = [-1, -1]
self.use_mkldnn = False
self.mean = 1.0
self.std = 2.0
self.seed = 10
class TestGaussianRandomOp3_ShapeTensorList(
TestGaussianRandomOp_ShapeTensorList):
def init_data(self):
self.shape = [123, 92]
self.infer_shape = [123, -1]
self.use_mkldnn = True
self.mean = 1.0
self.std = 2.0
self.seed = 10
class TestGaussianRandomOp4_ShapeTensorList(
TestGaussianRandomOp_ShapeTensorList):
def init_data(self):
self.shape = [123, 92]
self.infer_shape = [123, -1]
self.use_mkldnn = False
self.mean = 1.0
self.std = 2.0
self.seed = 10
# Situation 3: shape is a tensor
class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp):
def setUp(self):
'''Test gaussian_random op with specified value
'''
self.init()
self.init_data()
self.use_mkldnn = False
self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")}
self.attrs = {
'mean': self.mean,
'std': self.std,
'seed': self.seed,
'use_mkldnn': self.use_mkldnn
}
self.outputs = {'Out': np.zeros((123, 92), dtype=self.dtype)}
def init_data(self):
self.shape = [123, 92]
self.use_mkldnn = False
self.mean = 1.0
self.std = 2.0
self.seed = 10
# Test python API
class TestGaussianRandomAPI(unittest.TestCase):
def test_api(self):
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2000)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 500)
shape_tensor_int32 = fluid.data(name="shape_tensor_int32",
shape=[2],
dtype="int32")
shape_tensor_int64 = fluid.data(name="shape_tensor_int64",
shape=[2],
dtype="int64")
out_1 = fluid.layers.gaussian_random(shape=[2000, 500],
dtype="float32",
mean=0.0,
std=1.0,
seed=10)
out_2 = fluid.layers.gaussian_random(shape=[2000, positive_2_int32],
dtype="float32",
mean=0.,
std=1.0,
seed=10)
out_3 = fluid.layers.gaussian_random(shape=[2000, positive_2_int64],
dtype="float32",
mean=0.,
std=1.0,
seed=10)
out_4 = fluid.layers.gaussian_random(shape=shape_tensor_int32,
dtype="float32",
mean=0.,
std=1.0,
seed=10)
out_5 = fluid.layers.gaussian_random(shape=shape_tensor_int64,
dtype="float32",
mean=0.,
std=1.0,
seed=10)
out_6 = fluid.layers.gaussian_random(shape=shape_tensor_int64,
dtype=np.float32,
mean=0.,
std=1.0,
seed=10)
exe = fluid.Executor(place=fluid.XPUPlace(0))
res_1, res_2, res_3, res_4, res_5, res_6 = exe.run(
fluid.default_main_program(),
feed={
"shape_tensor_int32": np.array([2000, 500]).astype("int32"),
"shape_tensor_int64": np.array([2000, 500]).astype("int64"),
},
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6])
self.assertAlmostEqual(np.mean(res_1), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_1), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_2), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_2), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_3), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_3), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_4), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_5), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_5), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_5), 1., delta=0.1)
self.assertAlmostEqual(np.mean(res_6), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_6), 1., delta=0.1)
def test_default_dtype(self):
paddle.disable_static()
def test_default_fp16():
paddle.framework.set_default_dtype('float16')
paddle.tensor.random.gaussian([2, 3])
self.assertRaises(TypeError, test_default_fp16)
def test_default_fp32():
paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp64():
paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp64()
test_default_fp32()
paddle.enable_static()
class TestStandardNormalDtype(unittest.TestCase):
def test_default_dtype(self):
paddle.disable_static()
def test_default_fp16():
paddle.framework.set_default_dtype('float16')
paddle.tensor.random.standard_normal([2, 3])
self.assertRaises(TypeError, test_default_fp16)
def test_default_fp32():
paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp64():
paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp64()
test_default_fp32()
paddle.enable_static()
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
outs = self.calc_output(place)
outs = [np.array(out) for out in outs]
outs.sort(key=len)
self.verify_output(outs)
support_types = get_xpu_op_support_types('gaussian_random')
for stype in support_types:
create_test_class(globals(), XPUTestGaussianRandomOp, stype)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册