未验证 提交 f3861400 编写于 作者: W WJJ1995 提交者: GitHub

Add logical and compare ops (#855)

* add logical ops

* add run_dynamic switch

* add Or and Xor

* add Compare ops

* fixed compare bug
上级 523162f2
...@@ -76,7 +76,7 @@ ...@@ -76,7 +76,7 @@
| 81 | Add | 82 | Concat | 83 | Max | 84 | Min | | 81 | Add | 82 | Concat | 83 | Max | 84 | Min |
| 85 | GreaterOrEqual | 86 | GatherND | 87 | And | 88 | cos | | 85 | GreaterOrEqual | 86 | GatherND | 87 | And | 88 | cos |
| 89 | Neg | 90 | SpaceToDepth | 91 | GatherElement | 92 | Sin | | 89 | Neg | 90 | SpaceToDepth | 91 | GatherElement | 92 | Sin |
| 93 | CumSum | | | | | | | | 93 | CumSum | 94 | Or | 95 | Xor | 96 | Mod |
## PyTorch ## PyTorch
......
...@@ -48,7 +48,7 @@ settings.register_profile( ...@@ -48,7 +48,7 @@ settings.register_profile(
derandomize=True, derandomize=True,
report_multiple_bugs=False) report_multiple_bugs=False)
if float(os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) < 1 or \ if float(os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) < 1 or \
os.getenv('HYPOTHESIS_TEST_PROFILE', 'dev') == 'ci': os.getenv('HYPOTHESIS_TEST_PROFILE', 'dev') == 'ci':
settings.load_profile("ci") settings.load_profile("ci")
else: else:
settings.load_profile("dev") settings.load_profile("dev")
...@@ -155,6 +155,7 @@ class OPConvertAutoScanTest(unittest.TestCase): ...@@ -155,6 +155,7 @@ class OPConvertAutoScanTest(unittest.TestCase):
# max_opset_version is a fixed value # max_opset_version is a fixed value
max_opset_version = 15 max_opset_version = 15
enable_onnx_checker = True enable_onnx_checker = True
run_dynamic = False
self.num_ran_tests += 1 self.num_ran_tests += 1
# add ignore testcases # add ignore testcases
...@@ -189,12 +190,14 @@ class OPConvertAutoScanTest(unittest.TestCase): ...@@ -189,12 +190,14 @@ class OPConvertAutoScanTest(unittest.TestCase):
max_opset_version = config["max_opset_version"] max_opset_version = config["max_opset_version"]
if "enable_onnx_checker" in config.keys(): if "enable_onnx_checker" in config.keys():
enable_onnx_checker = config["enable_onnx_checker"] enable_onnx_checker = config["enable_onnx_checker"]
if "run_dynamic" in config.keys():
run_dynamic = config["run_dynamic"]
for i in range(len(op_names)): for i in range(len(op_names)):
obj = ONNXConverter(op_names[i], min_opset_version[i], obj = ONNXConverter(op_names[i], min_opset_version[i],
max_opset_version, op_names[i], inputs_name, max_opset_version, op_names[i], inputs_name,
outputs_name, inputs_shape, delta, rtol, attrs, outputs_name, inputs_shape, delta, rtol, attrs,
enable_onnx_checker) enable_onnx_checker, run_dynamic)
for input_type in input_type_list: for input_type in input_type_list:
input_data = list() input_data = list()
for j, shape in enumerate(test_data_shapes): for j, shape in enumerate(test_data_shapes):
......
...@@ -60,7 +60,7 @@ def compare(result, expect, delta=1e-10, rtol=1e-10): ...@@ -60,7 +60,7 @@ def compare(result, expect, delta=1e-10, rtol=1e-10):
result.shape, expect.shape) result.shape, expect.shape)
assert result.dtype == expect.dtype, "result.dtype: {} != expect.dtype: {}".format( assert result.dtype == expect.dtype, "result.dtype: {} != expect.dtype: {}".format(
result.dtype, expect.dtype) result.dtype, expect.dtype)
elif isinstance(result, (list, tuple)) and len(result) > 1: elif isinstance(result, (list, tuple)):
for i in range(len(result)): for i in range(len(result)):
if isinstance(result[i], (np.generic, np.ndarray)): if isinstance(result[i], (np.generic, np.ndarray)):
compare(result[i], expect[i], delta, rtol) compare(result[i], expect[i], delta, rtol)
...@@ -69,6 +69,8 @@ def compare(result, expect, delta=1e-10, rtol=1e-10): ...@@ -69,6 +69,8 @@ def compare(result, expect, delta=1e-10, rtol=1e-10):
# deal with scalar tensor # deal with scalar tensor
elif len(expect) == 1: elif len(expect) == 1:
compare(result, expect[0], delta, rtol) compare(result, expect[0], delta, rtol)
else:
raise Exception("Compare diff wrong!!!!!!")
def randtool(dtype, low, high, shape): def randtool(dtype, low, high, shape):
...@@ -101,7 +103,8 @@ class ONNXConverter(object): ...@@ -101,7 +103,8 @@ class ONNXConverter(object):
delta=1e-5, delta=1e-5,
rtol=1e-5, rtol=1e-5,
attrs=[], attrs=[],
enable_onnx_checker=True): enable_onnx_checker=True,
run_dynamic=False):
self.op_type = op_type self.op_type = op_type
assert isinstance(self.op_type, assert isinstance(self.op_type,
str), "The dtype of op_type must be string!" str), "The dtype of op_type must be string!"
...@@ -124,6 +127,7 @@ class ONNXConverter(object): ...@@ -124,6 +127,7 @@ class ONNXConverter(object):
self.inputs_shape = inputs_shape self.inputs_shape = inputs_shape
self.attrs = attrs self.attrs = attrs
self.enable_onnx_checker = enable_onnx_checker self.enable_onnx_checker = enable_onnx_checker
self.run_dynamic = run_dynamic
def set_input_data(self, group_name, *args): def set_input_data(self, group_name, *args):
""" """
...@@ -182,17 +186,29 @@ class ONNXConverter(object): ...@@ -182,17 +186,29 @@ class ONNXConverter(object):
""" """
make paddle res make paddle res
""" """
paddle_path = os.path.join( # input data
self.pwd, self.name, paddle_numpy_feed = list()
self.name + '_' + str(ver) + '_paddle/inference_model/model') paddle_tensor_feed = list()
paddle.disable_static()
# run
model = paddle.jit.load(paddle_path)
paddle_feed = list()
for i in range(len(self.input_feed)): for i in range(len(self.input_feed)):
paddle_feed.append(self.input_feed[self.inputs_name[i]]) paddle_numpy_feed.append(self.input_feed[self.inputs_name[i]])
result = model(*paddle_feed) paddle_tensor_feed.append(
paddle.to_tensor(self.input_feed[self.inputs_name[i]]))
if self.run_dynamic:
paddle_path = os.path.join(self.pwd, self.name,
self.name + '_' + str(ver) + '_paddle/')
import sys
sys.path.append(paddle_path)
from x2paddle_code import main
result = main(*paddle_tensor_feed)
else:
paddle_path = os.path.join(
self.pwd, self.name,
self.name + '_' + str(ver) + '_paddle/inference_model/model')
paddle.disable_static()
# run
model = paddle.jit.load(paddle_path)
result = model(*paddle_numpy_feed)
# get paddle outputs # get paddle outputs
if isinstance(result, (tuple, list)): if isinstance(result, (tuple, list)):
result = tuple(out.numpy() for out in result) result = tuple(out.numpy() for out in result)
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import OPConvertAutoScanTest
from hypothesis import reproduce_failure
from onnxbase import randtool
import hypothesis.strategies as st
import numpy as np
import unittest
min_opset_version_map = {
"Greater": 7,
"Less": 7,
"GreaterOrEqual": 12,
"LessOrEqual": 12,
}
class TestCompareopsConvert(OPConvertAutoScanTest):
"""
ONNX op: Compare ops
OPset version: 7~15
"""
def sample_convert_config(self, draw):
input1_shape = draw(
st.lists(
st.integers(
min_value=10, max_value=20), min_size=2, max_size=4))
if draw(st.booleans()):
input2_shape = [input1_shape[-1]]
else:
input2_shape = input1_shape
if draw(st.booleans()):
input2_shape = [1]
input_dtype = draw(st.sampled_from(["float32", "float64"]))
config = {
"op_names": ["Greater", "Less", "GreaterOrEqual", "LessOrEqual"],
"test_data_shapes": [input1_shape, input2_shape],
"test_data_types": [[input_dtype], [input_dtype]],
"inputs_shape": [],
"min_opset_version": 7,
"inputs_name": ["x", "y"],
"outputs_name": ["z"],
"delta": 1e-4,
"rtol": 1e-4,
"run_dynamic": True,
}
min_opset_versions = list()
for op_name in config["op_names"]:
min_opset_versions.append(min_opset_version_map[op_name])
config["min_opset_version"] = min_opset_versions
attrs = {}
return (config, attrs)
def test(self):
self.run_and_statis(max_examples=30)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import OPConvertAutoScanTest
from hypothesis import reproduce_failure
from onnxbase import randtool
import hypothesis.strategies as st
import numpy as np
import unittest
class TestEqualConvert(OPConvertAutoScanTest):
"""
ONNX op: Equal
OPset version: 7~15
"""
def sample_convert_config(self, draw):
input1_shape = draw(
st.lists(
st.integers(
min_value=10, max_value=20), min_size=2, max_size=4))
if draw(st.booleans()):
input2_shape = [input1_shape[-1]]
else:
input2_shape = input1_shape
if draw(st.booleans()):
input2_shape = [1]
input_dtype = draw(st.sampled_from(["int32", "int64", "bool"]))
config = {
"op_names": ["Equal"],
"test_data_shapes": [input1_shape, input2_shape],
"test_data_types": [[input_dtype], [input_dtype]],
"inputs_shape": [],
"min_opset_version": 7,
"inputs_name": ["x", "y"],
"outputs_name": ["z"],
"delta": 1e-4,
"rtol": 1e-4,
"run_dynamic": True,
}
attrs = {}
return (config, attrs)
def test(self):
self.run_and_statis(max_examples=30)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import OPConvertAutoScanTest
from hypothesis import reproduce_failure
from onnxbase import randtool
import hypothesis.strategies as st
import numpy as np
import unittest
min_opset_version_map = {
"And": 7,
"Or": 7,
"Xor": 7,
}
class TestLogicalopsConvert(OPConvertAutoScanTest):
"""
ONNX op: Logical ops
OPset version: 7~15
"""
def sample_convert_config(self, draw):
input1_shape = draw(
st.lists(
st.integers(
min_value=10, max_value=20), min_size=2, max_size=4))
if draw(st.booleans()):
input2_shape = [input1_shape[-1]]
else:
input2_shape = input1_shape
if draw(st.booleans()):
input2_shape = [1]
input_dtype = draw(st.sampled_from(["bool"]))
config = {
"op_names": ["And", "Or", "Xor"],
"test_data_shapes": [input1_shape, input2_shape],
"test_data_types": [[input_dtype], [input_dtype]],
"inputs_shape": [],
"min_opset_version": 7,
"inputs_name": ["x", "y"],
"outputs_name": ["z"],
"delta": 1e-4,
"rtol": 1e-4,
"run_dynamic": True,
}
min_opset_versions = list()
for op_name in config["op_names"]:
min_opset_versions.append(min_opset_version_map[op_name])
config["min_opset_version"] = min_opset_versions
attrs = {}
return (config, attrs)
def test(self):
self.run_and_statis(max_examples=30)
if __name__ == "__main__":
unittest.main()
...@@ -33,6 +33,26 @@ class OpSet7(OpSet): ...@@ -33,6 +33,26 @@ class OpSet7(OpSet):
def __init__(self, decoder, paddle_graph): def __init__(self, decoder, paddle_graph):
super(OpSet7, self).__init__(decoder, paddle_graph) super(OpSet7, self).__init__(decoder, paddle_graph)
@print_mapping_info
def Or(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
self.paddle_graph.add_layer(
"paddle.logical_or",
inputs={"x": val_x.name,
"y": val_y.name},
outputs=[node.name])
@print_mapping_info
def Xor(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
self.paddle_graph.add_layer(
"paddle.logical_xor",
inputs={"x": val_x.name,
"y": val_y.name},
outputs=[node.name])
@print_mapping_info @print_mapping_info
def Unsqueeze(self, node): def Unsqueeze(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True) val_x = self.graph.get_input_node(node, idx=0, copy=True)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册