提交 a957c928 编写于 作者: W wjj19950828

deal with comments

上级 47066a34
......@@ -59,11 +59,15 @@ class OPConvertAutoScanTest(unittest.TestCase):
super(OPConvertAutoScanTest, self).__init__(*args, **kwargs)
np.random.seed(1024)
paddle.enable_static()
self.num_ran_models = 0
self.num_ran_tests = 0
self.num_ignore_tests = 0
def add_ignore_pass_case(self, configs):
return
def run_and_statis(self,
max_examples=100,
opset_version=[7, 9, 15],
min_opset_version=7,
reproduce=None,
min_success_num=25,
max_duration=-1):
......@@ -83,6 +87,8 @@ class OPConvertAutoScanTest(unittest.TestCase):
report_multiple_bugs=False, )
settings.load_profile("ci")
# self.add_ignore_pass_case(configs)
def sample_convert_generator(draw):
return self.sample_convert_config(draw)
......@@ -101,8 +107,11 @@ class OPConvertAutoScanTest(unittest.TestCase):
logging.info(
"===================Statistical Information===================")
logging.info("Number of Generated Programs: {}".format(
self.num_ran_models))
successful_ran_programs = int(self.num_ran_models)
self.num_ran_tests))
logging.info("Number of Ignore Programs: {}".format(
self.num_ignore_tests))
successful_ran_programs = int(self.num_ran_tests -
self.num_ignore_tests)
if successful_ran_programs < min_success_num:
logging.warning("satisfied_programs = ran_programs")
logging.error(
......@@ -128,41 +137,34 @@ class OPConvertAutoScanTest(unittest.TestCase):
), "config must include test_data_shapes in dict keys"
assert "test_data_types" in config.keys(
), "config must include test_data_types in dict keys"
assert "opset_version" in config.keys(
), "config must include opset_version in dict keys"
assert "min_opset_version" in config.keys(
), "config must include min_opset_version in dict keys"
assert "inputs_name" in config.keys(
), "config must include inputs_name in dict keys"
assert "outputs_name" in config.keys(
), "config must include outputs_name in dict keys"
assert "inputs_shape" in config.keys(
), "config must include inputs_shape in dict keys"
assert "outputs_shape" in config.keys(
), "config must include outputs_shape in dict keys"
assert "outputs_dtype" in config.keys(
), "config must include outputs_dtype in dict keys"
op_names = config["op_names"]
test_data_shapes = config["test_data_shapes"]
test_data_types = config["test_data_types"]
opset_version = config["opset_version"]
min_opset_version = config["min_opset_version"]
inputs_name = config["inputs_name"]
outputs_name = config["outputs_name"]
inputs_shape = config["inputs_shape"]
outputs_shape = config["outputs_shape"]
outputs_dtype = config["outputs_dtype"]
use_gpu = True
if "use_gpu" in config.keys():
use_gpu = config["use_gpu"]
# max_opset_version is a fixed value
max_opset_version = 15
self.num_ran_models += 1
self.num_ran_tests += 1
# add ignore testcases
if self.add_ignore_pass_case(configs):
self.num_ignore_tests += 1
return
if not isinstance(op_names, (tuple, list)):
op_names = [op_names]
if not isinstance(opset_version[0], (tuple, list)):
opset_version = [opset_version]
if len(opset_version) == 1 and len(op_names) != len(opset_version):
opset_version = opset_version * len(op_names)
input_type_list = None
if len(test_data_types) > 1:
......@@ -181,12 +183,13 @@ class OPConvertAutoScanTest(unittest.TestCase):
delta = config["delta"]
if "rtol" in config.keys():
rtol = config["rtol"]
if "max_opset_version" in config.keys():
max_opset_version = config["max_opset_version"]
for i in range(len(op_names)):
obj = ONNXConverter(op_names[i], opset_version[i], op_names[i],
inputs_name, outputs_name, inputs_shape,
outputs_shape, outputs_dtype, delta, rtol,
use_gpu, attrs)
obj = ONNXConverter(op_names[i], min_opset_version,
max_opset_version, op_names[i], inputs_name,
outputs_name, inputs_shape, delta, rtol, attrs)
for input_type in input_type_list:
input_data = list()
for j, shape in enumerate(test_data_shapes):
......
......@@ -33,11 +33,11 @@ DTYPE_ONNX_STR_MAP = {
def compare(result, expect, delta=1e-10, rtol=1e-10):
"""
比较函数
:param result: 输入值
:param expect: 输出值
:param delta: 误差值
:return:
param meaning:
result: onnx result
expect: paddle result
delta: absolute error
rtol: relative error
"""
if type(result) == np.ndarray:
if type(expect) == list:
......@@ -66,8 +66,9 @@ def compare(result, expect, delta=1e-10, rtol=1e-10):
compare(result[i], expect[i], delta, rtol)
else:
compare(result[i].numpy(), expect[i], delta, rtol)
elif len(result) == 1:
compare(result[0], expect[0], delta, rtol)
# deal with scalar tensor
elif len(expect) == 1:
compare(result, expect[0], delta, rtol)
def randtool(dtype, low, high, shape):
......@@ -91,16 +92,14 @@ class ONNXConverter(object):
def __init__(self,
file_name,
ver_list,
min_opset_version,
max_opset_version,
op_type=[],
inputs_name=[],
outputs_name=[],
inputs_shape=[],
outputs_shape=[],
outputs_dtype=[],
delta=1e-5,
rtol=1e-5,
use_gpu=True,
attrs=[]):
self.op_type = op_type
assert isinstance(self.op_type,
......@@ -108,12 +107,10 @@ class ONNXConverter(object):
self.seed = 33
np.random.seed(self.seed)
paddle.seed(self.seed)
if use_gpu and paddle.device.is_compiled_with_cuda() is True:
self.places = ['gpu']
else:
self.places = ['cpu']
self.places = ['cpu']
self.name = file_name
self._version = ver_list
self.min_opset_version = min_opset_version
self.max_opset_version = max_opset_version
self.pwd = os.getcwd()
self.delta = delta
self.rtol = rtol
......@@ -124,8 +121,6 @@ class ONNXConverter(object):
self.inputs_name = inputs_name
self.outputs_name = outputs_name
self.inputs_shape = inputs_shape
self.outputs_shape = outputs_shape
self.outputs_dtype = outputs_dtype
self.attrs = attrs
def set_input_data(self, group_name, *args):
......@@ -189,7 +184,6 @@ class ONNXConverter(object):
result = tuple(out.numpy() for out in result)
else:
result = (result.numpy(), )
print("paddle result:", result[0].shape)
return result
def _mk_onnx_res(self, ver):
......@@ -200,7 +194,6 @@ class ONNXConverter(object):
os.path.join(self.pwd, self.name, self.name + '_' + str(ver) +
'.onnx'))
ort_outs = sess.run(output_names=None, input_feed=self.input_feed)
print("onnx result:", ort_outs[0].shape)
return ort_outs
def set_onnx_inputs(self):
......@@ -216,10 +209,7 @@ class ONNXConverter(object):
def set_onnx_outputs(self):
graph_outputs = list()
for i in range(len(self.outputs_name)):
graph_outputs.append(
helper.make_tensor_value_info(self.outputs_name[
i], DTYPE_ONNX_STR_MAP[self.outputs_dtype[i][0]],
self.outputs_shape[i]))
graph_outputs.append(onnx.ValueInfoProto(name=self.outputs_name[i]))
return graph_outputs
......@@ -243,6 +233,7 @@ class ONNXConverter(object):
opset_imports = [helper.make_opsetid("", ver)]
model = helper.make_model(
graph, producer_name='onnx-example', opset_imports=opset_imports)
model = onnx.shape_inference.infer_shapes(model)
onnx.save(model,
os.path.join(self.pwd, self.name,
self.name + '_' + str(ver) + '.onnx'))
......@@ -261,13 +252,13 @@ class ONNXConverter(object):
onnx_res = {}
paddle_res = {}
# export onnx models and make onnx res
for v in self._version:
for v in range(self.min_opset_version, self.max_opset_version + 1):
self._mk_onnx_graph(ver=v)
self._onnx_to_paddle(ver=v)
onnx_res[str(v)] = self._mk_onnx_res(ver=v)
paddle_res[str(v)] = self._mk_paddle_res(ver=v)
for v in self._version:
for v in range(self.min_opset_version, self.max_opset_version + 1):
compare(
onnx_res[str(v)],
paddle_res[str(v)],
......
......@@ -25,9 +25,17 @@ import unittest
class TestConv2dConvert(OPConvertAutoScanTest):
"""
ONNX op: Conv
OPset version: 7, 9
OPset version: 7~15
"""
def add_ignore_pass_case(self, configs):
config, attrs = configs
# Warning: SAME_UPPER and SAME_LOWER does not yet support dynamic shapes
if "SAME" in attrs["auto_pad"] and -1 in config["inputs_shape"][0]:
return True
else:
return False
def sample_convert_config(self, draw):
input_shape = draw(
st.lists(
......@@ -97,32 +105,13 @@ class TestConv2dConvert(OPConvertAutoScanTest):
"test_data_shapes": [input_shape, kernel_size],
"test_data_types": [['float32'], ['float32']],
"inputs_shape": [[-1, input_shape[1], -1, -1], kernel_size],
"outputs_shape": [[-1, kernel_size[0], -1, -1]],
"outputs_dtype": [['float32']],
"opset_version": [7, 9, 14],
"min_opset_version": 7,
"inputs_name": ["x", "W"],
"outputs_name": ["y"],
"delta": 1e-4,
"rtol": 1e-4
}
# Warning:
# 1、SAME_UPPER and SAME_LOWER does not yet support dynamic shapes
# 2、dilations only support 1
if "SAME" in auto_pad:
dilations = [1, 1]
config["inputs_shape"] = [input_shape, kernel_size]
config["outputs_shape"] = [[
input_shape[0], kernel_size[0],
int(input_shape[2] / strides[0]),
int(input_shape[3] / strides[1])
]]
if not isinstance(dilations, (tuple, list)):
dilations = [dilations]
if not isinstance(strides, (tuple, list)):
strides = [strides]
attrs = {
"auto_pad": auto_pad,
"dilations": dilations,
......@@ -132,10 +121,14 @@ class TestConv2dConvert(OPConvertAutoScanTest):
"strides": strides,
}
# if autopad equal SAME_UPPER and SAME_LOWER, dilations only support 1
if "SAME" in auto_pad:
attrs["dilations"] = [1, 1]
return (config, attrs)
def test(self):
self.run_and_statis(max_examples=30)
self.run_and_statis(max_examples=50)
if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册