未验证 提交 9aa82278 编写于 作者: 张经纬 提交者: GitHub

[CodeStyle][CINN] format python code via black (#54964)

上级 21518d2f
...@@ -25,7 +25,7 @@ def main(): ...@@ -25,7 +25,7 @@ def main():
srcs = [] srcs = []
srcs.append('#include <absl/strings/string_view.h>') srcs.append('#include <absl/strings/string_view.h>')
#srcs.append('#include "paddle/cinn/backends/llvm/cinn_runtime_llvm_ir.h"\n') # srcs.append('#include "paddle/cinn/backends/llvm/cinn_runtime_llvm_ir.h"\n')
srcs.append('namespace cinn::backends {') srcs.append('namespace cinn::backends {')
srcs.append("static const absl::string_view kRuntimeLlvmIr(") srcs.append("static const absl::string_view kRuntimeLlvmIr(")
srcs.append('R"ROC(') srcs.append('R"ROC(')
...@@ -36,12 +36,19 @@ def main(): ...@@ -36,12 +36,19 @@ def main():
srcs.append(');\n') srcs.append(');\n')
cmd = "{} --version".format(llvm_config) cmd = "{} --version".format(llvm_config)
version = subprocess.check_output( version = (
cmd, shell=True).decode('utf-8').strip().split('.') subprocess.check_output(cmd, shell=True)
.decode('utf-8')
.strip()
.split('.')
)
srcs.append("struct llvm_version {") srcs.append("struct llvm_version {")
for v, n in zip(["major", "minor", "micro"], version): for v, n in zip(["major", "minor", "micro"], version):
srcs.append(" static constexpr int k{} = {};".format( srcs.append(
v.title(), ''.join(filter(str.isdigit, n)))) " static constexpr int k{} = {};".format(
v.title(), ''.join(filter(str.isdigit, n))
)
)
srcs.append("};") srcs.append("};")
srcs.append('} // namespace cinn::backends') srcs.append('} // namespace cinn::backends')
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import os import os
cinndir = os.path.dirname(os.path.abspath(__file__)) cinndir = os.path.dirname(os.path.abspath(__file__))
runtime_include_dir = os.path.join(cinndir, "libs") runtime_include_dir = os.path.join(cinndir, "libs")
cuhfile = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh") cuhfile = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
......
...@@ -48,7 +48,8 @@ class XgbCostModel(object): ...@@ -48,7 +48,8 @@ class XgbCostModel(object):
samples = np.concatenate(samples, axis=0) samples = np.concatenate(samples, axis=0)
if isinstance(labels, list): if isinstance(labels, list):
labels = np.concatenate( labels = np.concatenate(
[[y] * length for y, length in zip(labels, lengths)], axis=0) [[y] * length for y, length in zip(labels, lengths)], axis=0
)
dmatrix = xgb.DMatrix(data=samples, label=labels) dmatrix = xgb.DMatrix(data=samples, label=labels)
self.booster = xgb.train(self.xgb_param, dmatrix, self.train_round) self.booster = xgb.train(self.xgb_param, dmatrix, self.train_round)
...@@ -77,7 +78,9 @@ class XgbCostModel(object): ...@@ -77,7 +78,9 @@ class XgbCostModel(object):
Args: Args:
path(str): path to save path(str): path to save
""" """
assert self.booster is not None, "Calling save on a XgbCostModel not been trained" assert (
self.booster is not None
), "Calling save on a XgbCostModel not been trained"
self.booster.save_model(path) self.booster.save_model(path)
def load(self, path): def load(self, path):
...@@ -94,5 +97,5 @@ class XgbCostModel(object): ...@@ -94,5 +97,5 @@ class XgbCostModel(object):
# But we should do that here if that's changable in the future. # But we should do that here if that's changable in the future.
def update(self, samples, labels): def update(self, samples, labels):
#xgb doesn't support incremental training, we leave this method as TODO # xgb doesn't support incremental training, we leave this method as TODO
pass pass
...@@ -1032,13 +1032,15 @@ def get_package_data_and_package_dir(): ...@@ -1032,13 +1032,15 @@ def get_package_data_and_package_dir():
package_data['paddle.libs'] += ['cinn_cuda_runtime_source.cuh'] package_data['paddle.libs'] += ['cinn_cuda_runtime_source.cuh']
cinn_fp16_file = ( cinn_fp16_file = (
env_dict.get("CINN_INCLUDE_DIR") + '/paddle/cinn/runtime/cuda/float16.h' env_dict.get("CINN_INCLUDE_DIR")
+ '/paddle/cinn/runtime/cuda/float16.h'
) )
if os.path.exists(cinn_fp16_file): if os.path.exists(cinn_fp16_file):
shutil.copy(cinn_fp16_file, libs_path) shutil.copy(cinn_fp16_file, libs_path)
package_data['paddle.libs'] += ['float16.h'] package_data['paddle.libs'] += ['float16.h']
cinn_bf16_file = ( cinn_bf16_file = (
env_dict.get("CINN_INCLUDE_DIR") + '/paddle/cinn/runtime/cuda/bfloat16.h' env_dict.get("CINN_INCLUDE_DIR")
+ '/paddle/cinn/runtime/cuda/bfloat16.h'
) )
if os.path.exists(cinn_bf16_file): if os.path.exists(cinn_bf16_file):
shutil.copy(cinn_bf16_file, libs_path) shutil.copy(cinn_bf16_file, libs_path)
......
...@@ -51,13 +51,15 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise): ...@@ -51,13 +51,15 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):
cin_index = 3 cin_index = 3
filter_size_new = [ filter_size_new = [
filter_size[1] * input_shape[cin_index], filter_size[1] * input_shape[cin_index],
filter_size[0] // groups, filter_size[2], filter_size[3] filter_size[0] // groups,
filter_size[2],
filter_size[3],
] ]
else: else:
filter_size_new = filter_size filter_size_new = filter_size
param = paddle.nn.initializer.NumpyArrayInitializer( param = paddle.nn.initializer.NumpyArrayInitializer(
np.array( np.array(inputs_data[1]).reshape(filter_size_new).astype("float32")
inputs_data[1]).reshape(filter_size_new).astype("float32")) )
# filter: (c_out, c_in // group, kernel_h, kernel_w) # filter: (c_out, c_in // group, kernel_h, kernel_w)
filter_hw = list(filter_size_new[2:4]) filter_hw = list(filter_size_new[2:4])
if data_format == "NHWC": if data_format == "NHWC":
...@@ -78,7 +80,8 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise): ...@@ -78,7 +80,8 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):
dilation=dilation, dilation=dilation,
groups=groups, groups=groups,
param_attr=param, param_attr=param,
data_format=data_format) data_format=data_format,
)
exe = static.Executor(paddle.CPUPlace()) exe = static.Executor(paddle.CPUPlace())
exe.run(static.default_startup_program()) exe.run(static.default_startup_program())
......
...@@ -26,7 +26,8 @@ a = static.data(name="A", shape=[-1, size], dtype='float32') ...@@ -26,7 +26,8 @@ a = static.data(name="A", shape=[-1, size], dtype='float32')
label = static.data(name="label", shape=[size], dtype='float32') label = static.data(name="label", shape=[size], dtype='float32')
a1 = static.nn.fc( a1 = static.nn.fc(
x=a, size=size, activation="relu", bias_attr=None, num_flatten_dims=1) x=a, size=size, activation="relu", bias_attr=None, num_flatten_dims=1
)
cost = paddle.nn.functional.square_error_cost(a1, label) cost = paddle.nn.functional.square_error_cost(a1, label)
avg_cost = paddle.mean(cost) avg_cost = paddle.mean(cost)
......
...@@ -33,7 +33,8 @@ fc_out = static.nn.fc( ...@@ -33,7 +33,8 @@ fc_out = static.nn.fc(
size=size, size=size,
activation="relu", activation="relu",
bias_attr=paddle.ParamAttr(name="fc_bias"), bias_attr=paddle.ParamAttr(name="fc_bias"),
num_flatten_dims=1) num_flatten_dims=1,
)
for i in range(num_layers - 1): for i in range(num_layers - 1):
fc_out = static.nn.fc( fc_out = static.nn.fc(
...@@ -41,7 +42,8 @@ for i in range(num_layers - 1): ...@@ -41,7 +42,8 @@ for i in range(num_layers - 1):
size=size, size=size,
activation="relu", activation="relu",
bias_attr=paddle.ParamAttr(name="fc_bias"), bias_attr=paddle.ParamAttr(name="fc_bias"),
num_flatten_dims=1) num_flatten_dims=1,
)
cost = paddle.nn.functional.square_error_cost(fc_out, label) cost = paddle.nn.functional.square_error_cost(fc_out, label)
avg_cost = paddle.mean(cost) avg_cost = paddle.mean(cost)
......
...@@ -22,15 +22,19 @@ import paddle.static as static ...@@ -22,15 +22,19 @@ import paddle.static as static
paddle.enable_static() paddle.enable_static()
resnet_input = static.data( resnet_input = static.data(
name="resnet_input", shape=[1, 160, 7, 7], dtype='float32') name="resnet_input", shape=[1, 160, 7, 7], dtype='float32'
)
label = static.data(name="label", shape=[1, 960, 7, 7], dtype='float32') label = static.data(name="label", shape=[1, 960, 7, 7], dtype='float32')
d = paddle.nn.functional.relu6(resnet_input) d = paddle.nn.functional.relu6(resnet_input)
f = static.nn.conv2d( f = static.nn.conv2d(
input=d, num_filters=960, filter_size=1, stride=1, padding=0, dilation=1) input=d, num_filters=960, filter_size=1, stride=1, padding=0, dilation=1
)
g = static.nn.conv2d( g = static.nn.conv2d(
input=f, num_filters=160, filter_size=1, stride=1, padding=0, dilation=1) input=f, num_filters=160, filter_size=1, stride=1, padding=0, dilation=1
)
i = static.nn.conv2d( i = static.nn.conv2d(
input=g, num_filters=960, filter_size=1, stride=1, padding=0, dilation=1) input=g, num_filters=960, filter_size=1, stride=1, padding=0, dilation=1
)
j1 = paddle.scale(i, scale=2.0, bias=0.5) j1 = paddle.scale(i, scale=2.0, bias=0.5)
j = paddle.scale(j1, scale=2.0, bias=0.5) j = paddle.scale(j1, scale=2.0, bias=0.5)
temp7 = paddle.nn.functional.relu(j) temp7 = paddle.nn.functional.relu(j)
...@@ -46,6 +50,7 @@ exe = static.Executor(cpu) ...@@ -46,6 +50,7 @@ exe = static.Executor(cpu)
exe.run(static.default_startup_program()) exe.run(static.default_startup_program())
fluid.io.save_inference_model("./resnet_model", [resnet_input.name], [temp7], fluid.io.save_inference_model(
exe) "./resnet_model", [resnet_input.name], [temp7], exe
)
print('res', temp7.name) print('res', temp7.name)
...@@ -25,38 +25,44 @@ class FusionTest(PassTest): ...@@ -25,38 +25,44 @@ class FusionTest(PassTest):
super(FusionTest, self).__init__(*args, **kwargs) super(FusionTest, self).__init__(*args, **kwargs)
def init_input_data(self): def init_input_data(self):
"""Set feed data """Set feed data"""
"""
self.feed_data = dict() self.feed_data = dict()
logger.warn("No Input Data") logger.warn("No Input Data")
def build_program(self, builder, target): def build_program(self, builder, target):
""" """ """
"""
raise Exception("Not implemented.") raise Exception("Not implemented.")
def check_fusion_outputs(self, def check_fusion_outputs(
group_size, self,
max_relative_error=1e-5, group_size,
all_equal=False, max_relative_error=1e-5,
equal_nan=False): all_equal=False,
equal_nan=False,
):
base_passes = ["AutoCast", "Decomposer", "TransToCustomCallPass"] base_passes = ["AutoCast", "Decomposer", "TransToCustomCallPass"]
fusion_passes = ["OpFusionPass", "FusionMergePass"] fusion_passes = ["OpFusionPass", "FusionMergePass"]
real_group_size = self.get_pass_size(base_passes + fusion_passes) real_group_size = self.get_pass_size(base_passes + fusion_passes)
logger.debug( logger.debug(
"The model has been fused into {} groups".format(real_group_size)) "The model has been fused into {} groups".format(real_group_size)
)
self.assertEqual( self.assertEqual(
real_group_size, real_group_size,
group_size, group_size,
msg= msg="The model should be fused into {} groups, but actually fused {} groups".format(
"The model should be fused into {} groups, but actually fused {} groups" group_size, real_group_size
.format(group_size, real_group_size)) ),
)
cinn_no_fusion_outputs = self.get_pass_outputs(base_passes) cinn_no_fusion_outputs = self.get_pass_outputs(base_passes)
cinn_fusion_outputs = self.get_pass_outputs(base_passes + cinn_fusion_outputs = self.get_pass_outputs(base_passes + fusion_passes)
fusion_passes)
logger.debug("============ Check Outputs ============") logger.debug("============ Check Outputs ============")
self.check_results(cinn_no_fusion_outputs, cinn_fusion_outputs, self.check_results(
max_relative_error, all_equal, equal_nan) cinn_no_fusion_outputs,
cinn_fusion_outputs,
max_relative_error,
all_equal,
equal_nan,
)
...@@ -25,12 +25,15 @@ class TestGroup1(FusionTest): ...@@ -25,12 +25,15 @@ class TestGroup1(FusionTest):
def build_program(self, builder, target): def build_program(self, builder, target):
eager_in_tmp_8 = builder.create_input( eager_in_tmp_8 = builder.create_input(
self.nptype2cinntype(self.feed_data['eager_in_tmp_8'].dtype), self.nptype2cinntype(self.feed_data['eager_in_tmp_8'].dtype),
self.feed_data['eager_in_tmp_8'].shape, "eager_in_tmp_8") self.feed_data['eager_in_tmp_8'].shape,
"eager_in_tmp_8",
)
var_15 = builder.cast(eager_in_tmp_8, dtype="float16") var_15 = builder.cast(eager_in_tmp_8, dtype="float16")
# cast should not fused into reduce when the output need fetch # cast should not fused into reduce when the output need fetch
var_73 = builder.broadcast_to( var_73 = builder.broadcast_to(
var_15, broadcast_axes=[0, 1, 2, 3], out_shape=[32, 12, 128, 128]) var_15, broadcast_axes=[0, 1, 2, 3], out_shape=[32, 12, 128, 128]
)
var_55 = builder.cast(var_73, dtype="float32") var_55 = builder.cast(var_73, dtype="float32")
var_76 = builder.reduce_max(var_55, dim=[3], keep_dim=False) var_76 = builder.reduce_max(var_55, dim=[3], keep_dim=False)
...@@ -49,12 +52,15 @@ class TestGroup2(FusionTest): ...@@ -49,12 +52,15 @@ class TestGroup2(FusionTest):
def build_program(self, builder, target): def build_program(self, builder, target):
eager_in_tmp_8 = builder.create_input( eager_in_tmp_8 = builder.create_input(
self.nptype2cinntype(self.feed_data['eager_in_tmp_8'].dtype), self.nptype2cinntype(self.feed_data['eager_in_tmp_8'].dtype),
self.feed_data['eager_in_tmp_8'].shape, "eager_in_tmp_8") self.feed_data['eager_in_tmp_8'].shape,
"eager_in_tmp_8",
)
var_15 = builder.cast(eager_in_tmp_8, dtype="float16") var_15 = builder.cast(eager_in_tmp_8, dtype="float16")
# cast should fused into reduce when the output not fetched # cast should fused into reduce when the output not fetched
var_73 = builder.broadcast_to( var_73 = builder.broadcast_to(
var_15, broadcast_axes=[0, 1, 2, 3], out_shape=[32, 12, 128, 128]) var_15, broadcast_axes=[0, 1, 2, 3], out_shape=[32, 12, 128, 128]
)
var_55 = builder.cast(var_73, dtype="float32") var_55 = builder.cast(var_73, dtype="float32")
var_76 = builder.reduce_max(var_55, dim=[3], keep_dim=False) var_76 = builder.reduce_max(var_55, dim=[3], keep_dim=False)
......
...@@ -22,9 +22,11 @@ class TestGroup1(FusionTest): ...@@ -22,9 +22,11 @@ class TestGroup1(FusionTest):
def build_program(self, builder, target): def build_program(self, builder, target):
x = builder.fill_constant( x = builder.fill_constant(
dtype="float32", shape=[4, 5, 20, 20], value=1.00000000) dtype="float32", shape=[4, 5, 20, 20], value=1.00000000
)
y = builder.cast( y = builder.cast(
builder.reduce_sum(x, dim=[2], keep_dim=False), "float16") builder.reduce_sum(x, dim=[2], keep_dim=False), "float16"
)
feed_list = [] feed_list = []
fetch_list = [y] fetch_list = [y]
......
...@@ -27,13 +27,19 @@ class TestGroup1(FusionTest): ...@@ -27,13 +27,19 @@ class TestGroup1(FusionTest):
def build_program(self, builder, target): def build_program(self, builder, target):
cond = builder.create_input( cond = builder.create_input(
self.nptype2cinntype(self.feed_data['cond'].dtype), self.nptype2cinntype(self.feed_data['cond'].dtype),
self.feed_data['cond'].shape, "cond") self.feed_data['cond'].shape,
"cond",
)
true_value = builder.create_input( true_value = builder.create_input(
self.nptype2cinntype(self.feed_data['true_value'].dtype), self.nptype2cinntype(self.feed_data['true_value'].dtype),
self.feed_data['true_value'].shape, "true_value") self.feed_data['true_value'].shape,
"true_value",
)
false_value = builder.create_input( false_value = builder.create_input(
self.nptype2cinntype(self.feed_data['false_value'].dtype), self.nptype2cinntype(self.feed_data['false_value'].dtype),
self.feed_data['false_value'].shape, "false_value") self.feed_data['false_value'].shape,
"false_value",
)
var_1 = builder.select(cond, true_value, false_value) var_1 = builder.select(cond, true_value, false_value)
var_2 = builder.reduce_sum(var_1, dim=[2], keep_dim=False) var_2 = builder.reduce_sum(var_1, dim=[2], keep_dim=False)
......
...@@ -27,6 +27,7 @@ from cinn.common import is_compiled_with_cuda ...@@ -27,6 +27,7 @@ from cinn.common import is_compiled_with_cuda
from cinn.framework import Scope from cinn.framework import Scope
import sys import sys
sys.path.append("/work/dev_CINN/build/python/tests") sys.path.append("/work/dev_CINN/build/python/tests")
from test.cinn.ops.op_test import OpTest, OpTestTool from test.cinn.ops.op_test import OpTest, OpTestTool
...@@ -76,7 +77,7 @@ class OpMapperTest(OpTest): ...@@ -76,7 +77,7 @@ class OpMapperTest(OpTest):
x1 = paddle.static.data(name='x1', shape=[1, 2], dtype='float32') x1 = paddle.static.data(name='x1', shape=[1, 2], dtype='float32')
x2 = paddle.static.data(name='x2', shape=[1, 2], dtype='float32') x2 = paddle.static.data(name='x2', shape=[1, 2], dtype='float32')
return {'X' : [x1, x2]} return {'X' : [x1, x2]}
``` """ ```"""
return dict() return dict()
def set_op_attrs(self) -> dict: def set_op_attrs(self) -> dict:
...@@ -136,29 +137,31 @@ class OpMapperTest(OpTest): ...@@ -136,29 +137,31 @@ class OpMapperTest(OpTest):
def __check_valid(self): def __check_valid(self):
self.assertIsInstance( self.assertIsInstance(
self.op_type, str, msg="The op type should be a string") self.op_type, str, msg="The op type should be a string"
)
self.assertNotEqual( self.assertNotEqual(
self.op_type, "", msg="The op type should not empty") self.op_type, "", msg="The op type should not empty"
)
self.assertIsInstance( self.assertIsInstance(
self.inputs, self.inputs,
dict, dict,
msg= msg="The set_op_inputs should be return dict(InputName, list(Variable)), where Variable are created by paddle.static.data",
"The set_op_inputs should be return dict(InputName, list(Variable)), where Variable are created by paddle.static.data"
) )
self.assertIsInstance( self.assertIsInstance(
self.attrs, self.attrs,
dict, dict,
msg="The set_op_attrs should be return dict(AttrName, AttrValue)") msg="The set_op_attrs should be return dict(AttrName, AttrValue)",
)
self.assertIsInstance( self.assertIsInstance(
self.output_dtypes, self.output_dtypes,
dict, dict,
msg= msg="The set_op_outputs should be return dict(OutName, list(OutDtype)), where OutName and OutDtype are string",
"The set_op_outputs should be return dict(OutName, list(OutDtype)), where OutName and OutDtype are string"
) )
self.assertGreater( self.assertGreater(
len(self.output_dtypes), len(self.output_dtypes),
0, 0,
msg="The set_op_outputs cannot return a empty dict") msg="The set_op_outputs cannot return a empty dict",
)
for name, var in self.input_arg_map.items(): for name, var in self.input_arg_map.items():
self.assertIn(name, self.feed_data) self.assertIn(name, self.feed_data)
...@@ -166,24 +169,30 @@ class OpMapperTest(OpTest): ...@@ -166,24 +169,30 @@ class OpMapperTest(OpTest):
var.shape, var.shape,
self.feed_data[name].shape, self.feed_data[name].shape,
msg="The shape of input {} in feed_data is error".format( msg="The shape of input {} in feed_data is error".format(
var.name)) var.name
),
)
self.assertEqual( self.assertEqual(
self.paddleddtype2nptype(var.dtype), self.paddleddtype2nptype(var.dtype),
str(self.feed_data[name].dtype), str(self.feed_data[name].dtype),
msg="The dtype of input {} in feed_data is error".format( msg="The dtype of input {} in feed_data is error".format(
var.name)) var.name
),
)
for out_name, in_name in self.inplace_outputs.items(): for out_name, in_name in self.inplace_outputs.items():
self.assertNotIn( self.assertNotIn(
out_name, out_name,
self.output_dtypes, self.output_dtypes,
msg= msg="The {} should not declare twice because it's a inplace output, you should remove it from \"set_op_outputs\"".format(
"The {} should not declare twice because it's a inplace output, you should remove it from \"set_op_outputs\"" out_name
.format(out_name)) ),
)
self.assertIn( self.assertIn(
in_name, in_name,
self.inputs, self.inputs,
msg="The inplace var should existed in op' inputs dict") msg="The inplace var should existed in op' inputs dict",
)
def __get_arguments_map(self, param_maps): def __get_arguments_map(self, param_maps):
arg_maps = dict() arg_maps = dict()
...@@ -191,18 +200,18 @@ class OpMapperTest(OpTest): ...@@ -191,18 +200,18 @@ class OpMapperTest(OpTest):
self.assertIsInstance( self.assertIsInstance(
args, args,
list, list,
msg= msg="The type of arguments should be list(Variable), where Variable are created by paddle.static.data",
"The type of arguments should be list(Variable), where Variable are created by paddle.static.data"
) )
for var in args: for var in args:
self.assertIsInstance( self.assertIsInstance(
var, var,
PaddleVariable, PaddleVariable,
msg="The type of argument should be paddle.static.Variable" msg="The type of argument should be paddle.static.Variable",
) )
self.assertTrue( self.assertTrue(
(var.name not in arg_maps) or (arg_maps[var.name] == var), (var.name not in arg_maps) or (arg_maps[var.name] == var),
msg="Argument %s is duplicated" % var.name) msg="Argument %s is duplicated" % var.name,
)
arg_maps[var.name] = var arg_maps[var.name] = var
return arg_maps return arg_maps
...@@ -215,9 +224,14 @@ class OpMapperTest(OpTest): ...@@ -215,9 +224,14 @@ class OpMapperTest(OpTest):
for i in range(len(self.fetch_targets)): for i in range(len(self.fetch_targets)):
if self.fetch_targets[i].name not in self.skip_check_list: if self.fetch_targets[i].name not in self.skip_check_list:
check_outputs.append(results[i]) check_outputs.append(results[i])
logger.debug(msg="{}, shape={}, dtype={}:\n{}".format( logger.debug(
self.fetch_targets[i].name, results[i].shape, msg="{}, shape={}, dtype={}:\n{}".format(
str(results[i].dtype), results[i])) self.fetch_targets[i].name,
results[i].shape,
str(results[i].dtype),
results[i],
)
)
return check_outputs return check_outputs
...@@ -225,8 +239,14 @@ class OpMapperTest(OpTest): ...@@ -225,8 +239,14 @@ class OpMapperTest(OpTest):
if logger.isEnabledFor(logging.DEBUG): if logger.isEnabledFor(logging.DEBUG):
debug_info = "" debug_info = ""
for k, v in info_dict.items(): for k, v in info_dict.items():
debug_info += k + ", shape=" + str(v.shape) + ", dtype=" + str( debug_info += (
v.dtype) + ":\n" k
+ ", shape="
+ str(v.shape)
+ ", dtype="
+ str(v.dtype)
+ ":\n"
)
debug_info += str(v) + "\n" debug_info += str(v) + "\n"
logger.debug(title + ":\n" + debug_info) logger.debug(title + ":\n" + debug_info)
...@@ -245,8 +265,7 @@ class OpMapperTest(OpTest): ...@@ -245,8 +265,7 @@ class OpMapperTest(OpTest):
self.assertIsInstance( self.assertIsInstance(
dtypes, dtypes,
list, list,
msg= msg="The set_op_outputs should be return dict(OutName, list(OutDtype)), where OutName and OutDtype are string",
"The set_op_outputs should be return dict(OutName, list(OutDtype)), where OutName and OutDtype are string"
) )
self.outputs[var_name] = list() self.outputs[var_name] = list()
for dtype in dtypes: for dtype in dtypes:
...@@ -268,7 +287,8 @@ class OpMapperTest(OpTest): ...@@ -268,7 +287,8 @@ class OpMapperTest(OpTest):
type=self.op_type, type=self.op_type,
inputs=self.inputs, inputs=self.inputs,
outputs=self.outputs, outputs=self.outputs,
attrs=self.attrs).desc attrs=self.attrs,
).desc
logger.debug("Paddle Program:\n" + str(main_program)) logger.debug("Paddle Program:\n" + str(main_program))
...@@ -279,7 +299,8 @@ class OpMapperTest(OpTest): ...@@ -279,7 +299,8 @@ class OpMapperTest(OpTest):
main_program, main_program,
self.feed_data, self.feed_data,
fetch_list=self.fetch_targets, fetch_list=self.fetch_targets,
return_numpy=True) return_numpy=True,
)
# NOTE: The unittest of `test_reduce_op`, `test_argmax_op`, `test_argmin_op` will # NOTE: The unittest of `test_reduce_op`, `test_argmax_op`, `test_argmin_op` will
# output 0D-Tensor, hence we need to reshape them into 1D-Tensor temporarily. # output 0D-Tensor, hence we need to reshape them into 1D-Tensor temporarily.
...@@ -299,13 +320,15 @@ class OpMapperTest(OpTest): ...@@ -299,13 +320,15 @@ class OpMapperTest(OpTest):
convertor.create_input( convertor.create_input(
dtype=self.paddleddtype2nptype(var.dtype), dtype=self.paddleddtype2nptype(var.dtype),
shape=var.shape, shape=var.shape,
name=var_name) name=var_name,
)
convertor.append_op( convertor.append_op(
type=self.op_type, type=self.op_type,
inputs=self.op_desc.inputs(), inputs=self.op_desc.inputs(),
outputs=self.op_desc.outputs(), outputs=self.op_desc.outputs(),
attrs=self.attrs) attrs=self.attrs,
)
prog = convertor() prog = convertor()
...@@ -326,8 +349,10 @@ class OpMapperTest(OpTest): ...@@ -326,8 +349,10 @@ class OpMapperTest(OpTest):
self.assertIn( self.assertIn(
cinn_name, cinn_name,
vars, vars,
msg="Cannot find variable " + cinn_name + msg="Cannot find variable "
" in cinn program's var list") + cinn_name
+ " in cinn program's var list",
)
cinn_inputs.append(vars[cinn_name]) cinn_inputs.append(vars[cinn_name])
cinn_feed_datas.append(self.feed_data[name]) cinn_feed_datas.append(self.feed_data[name])
...@@ -348,7 +373,8 @@ class OpMapperTest(OpTest): ...@@ -348,7 +373,8 @@ class OpMapperTest(OpTest):
# map the name the variable # map the name the variable
self.assertGreater( self.assertGreater(
len(fetch_names), 0, msg="The program's output cannot be empty!") len(fetch_names), 0, msg="The program's output cannot be empty!"
)
cinn_output_vars = list() cinn_output_vars = list()
for name in fetch_names: for name in fetch_names:
cinn_name = convertor.get_cinn_name(name) cinn_name = convertor.get_cinn_name(name)
...@@ -356,8 +382,10 @@ class OpMapperTest(OpTest): ...@@ -356,8 +382,10 @@ class OpMapperTest(OpTest):
self.assertIn( self.assertIn(
cinn_name, cinn_name,
vars, vars,
msg="Cannot find variable " + cinn_name + msg="Cannot find variable "
" in cinn program's var list") + cinn_name
+ " in cinn program's var list",
)
cinn_output_vars.append(vars[cinn_name]) cinn_output_vars.append(vars[cinn_name])
# run and get result # run and get result
...@@ -368,7 +396,8 @@ class OpMapperTest(OpTest): ...@@ -368,7 +396,8 @@ class OpMapperTest(OpTest):
cinn_feed_datas, cinn_feed_datas,
cinn_output_vars, cinn_output_vars,
passes=list(), passes=list(),
scope=scope) scope=scope,
)
logger.debug(msg="CINN result:") logger.debug(msg="CINN result:")
self.cinn_outputs = self.__remove_skip_outputs(results) self.cinn_outputs = self.__remove_skip_outputs(results)
......
...@@ -40,7 +40,8 @@ class TestArgmaxOp(OpMapperTest): ...@@ -40,7 +40,8 @@ class TestArgmaxOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -48,7 +49,7 @@ class TestArgmaxOp(OpMapperTest): ...@@ -48,7 +49,7 @@ class TestArgmaxOp(OpMapperTest):
"axis": self.axis, "axis": self.axis,
"flatten": self.flatten, "flatten": self.flatten,
"keepdims": self.keepdims, "keepdims": self.keepdims,
"dtype": self.nptype2paddledtype(self.output_dtype) "dtype": self.nptype2paddledtype(self.output_dtype),
} }
def set_op_outputs(self): def set_op_outputs(self):
...@@ -77,7 +78,7 @@ class TestArgmaxCase1(TestArgmaxOp): ...@@ -77,7 +78,7 @@ class TestArgmaxCase1(TestArgmaxOp):
class TestArgmaxCase2(TestArgmaxOp): class TestArgmaxCase2(TestArgmaxOp):
""" """
Test case with true keepdims Test case with true keepdims
""" """
def init_input_data(self): def init_input_data(self):
......
...@@ -40,7 +40,8 @@ class TestArgminOp(OpMapperTest): ...@@ -40,7 +40,8 @@ class TestArgminOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -48,7 +49,7 @@ class TestArgminOp(OpMapperTest): ...@@ -48,7 +49,7 @@ class TestArgminOp(OpMapperTest):
"axis": self.axis, "axis": self.axis,
"flatten": self.flatten, "flatten": self.flatten,
"keepdims": self.keepdims, "keepdims": self.keepdims,
"dtype": self.nptype2paddledtype(self.output_dtype) "dtype": self.nptype2paddledtype(self.output_dtype),
} }
def set_op_outputs(self): def set_op_outputs(self):
...@@ -77,7 +78,7 @@ class TestArgminCase1(TestArgminOp): ...@@ -77,7 +78,7 @@ class TestArgminCase1(TestArgminOp):
class TestArgminCase2(TestArgminOp): class TestArgminCase2(TestArgminOp):
""" """
Test case with true keepdims Test case with true keepdims
""" """
def init_input_data(self): def init_input_data(self):
......
...@@ -34,7 +34,8 @@ class TestArgSortOp(OpMapperTest): ...@@ -34,7 +34,8 @@ class TestArgSortOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -44,8 +44,7 @@ class TestAssignValueOp(OpMapperTest): ...@@ -44,8 +44,7 @@ class TestAssignValueOp(OpMapperTest):
else: else:
self.assertTrue( self.assertTrue(
False, False,
msg= msg="The data type of 'input' must be bool, float32, int32 or int64",
"The data type of 'input' must be bool, float32, int32 or int64"
) )
def set_op_attrs(self): def set_op_attrs(self):
...@@ -54,7 +53,8 @@ class TestAssignValueOp(OpMapperTest): ...@@ -54,7 +53,8 @@ class TestAssignValueOp(OpMapperTest):
value_name, values = self.convert_values(dtype, self.feed_data['x']) value_name, values = self.convert_values(dtype, self.feed_data['x'])
return { return {
'dtype': self.nptype2paddledtype( 'dtype': self.nptype2paddledtype(
str(dtype)), # should keep the same as input str(dtype)
), # should keep the same as input
'shape': shape, 'shape': shape,
value_name: values, value_name: values,
} }
......
...@@ -34,11 +34,13 @@ class TestAtan2Op(OpMapperTest): ...@@ -34,11 +34,13 @@ class TestAtan2Op(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data( y = paddle.static.data(
name='y', name='y',
shape=self.feed_data['y'].shape, shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype) dtype=self.feed_data['y'].dtype,
)
return {'X1': [x], 'X2': [y]} return {'X1': [x], 'X2': [y]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -38,29 +38,34 @@ class TestBatchNormOp(OpMapperTest): ...@@ -38,29 +38,34 @@ class TestBatchNormOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
scale = paddle.static.data( scale = paddle.static.data(
name='scale', name='scale',
shape=self.feed_data['scale'].shape, shape=self.feed_data['scale'].shape,
dtype=self.feed_data['scale'].dtype) dtype=self.feed_data['scale'].dtype,
)
bias = paddle.static.data( bias = paddle.static.data(
name='bias', name='bias',
shape=self.feed_data['bias'].shape, shape=self.feed_data['bias'].shape,
dtype=self.feed_data['bias'].dtype) dtype=self.feed_data['bias'].dtype,
)
mean = paddle.static.data( mean = paddle.static.data(
name='mean', name='mean',
shape=self.feed_data['mean'].shape, shape=self.feed_data['mean'].shape,
dtype=self.feed_data['mean'].dtype) dtype=self.feed_data['mean'].dtype,
)
variance = paddle.static.data( variance = paddle.static.data(
name='variance', name='variance',
shape=self.feed_data['variance'].shape, shape=self.feed_data['variance'].shape,
dtype=self.feed_data['variance'].dtype) dtype=self.feed_data['variance'].dtype,
)
return { return {
'X': [x], 'X': [x],
'Scale': [scale], 'Scale': [scale],
'Bias': [bias], 'Bias': [bias],
'Mean': [mean], 'Mean': [mean],
'Variance': [variance] 'Variance': [variance],
} }
def set_op_attrs(self): def set_op_attrs(self):
...@@ -70,7 +75,7 @@ class TestBatchNormOp(OpMapperTest): ...@@ -70,7 +75,7 @@ class TestBatchNormOp(OpMapperTest):
'data_layout': 'NCHW', 'data_layout': 'NCHW',
'is_test': False, 'is_test': False,
'trainable_statistics': False, 'trainable_statistics': False,
'use_global_stats': False 'use_global_stats': False,
} }
def set_op_outputs(self): def set_op_outputs(self):
...@@ -99,7 +104,7 @@ class TestBatchNormInferOp(TestBatchNormOp): ...@@ -99,7 +104,7 @@ class TestBatchNormInferOp(TestBatchNormOp):
'data_layout': 'NCHW', 'data_layout': 'NCHW',
'is_test': True, 'is_test': True,
'trainable_statistics': False, 'trainable_statistics': False,
'use_global_stats': False 'use_global_stats': False,
} }
def skip_check_outputs(self): def skip_check_outputs(self):
......
...@@ -34,11 +34,13 @@ class TestBitwiseOp(OpMapperTest): ...@@ -34,11 +34,13 @@ class TestBitwiseOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data( y = paddle.static.data(
name='y', name='y',
shape=self.feed_data['y'].shape, shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype) dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]} return {'X': [x], 'Y': [y]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -74,7 +76,8 @@ class TestBitwiseNotOp(TestBitwiseOp): ...@@ -74,7 +76,8 @@ class TestBitwiseNotOp(TestBitwiseOp):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def test_check_results(self): def test_check_results(self):
......
...@@ -34,7 +34,8 @@ class TestCholeskyOp(OpMapperTest): ...@@ -34,7 +34,8 @@ class TestCholeskyOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -35,7 +35,8 @@ class TestClipOp(OpMapperTest): ...@@ -35,7 +35,8 @@ class TestClipOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -115,7 +116,7 @@ class TestClipOpMaxTensor(TestClipOp): ...@@ -115,7 +116,7 @@ class TestClipOpMaxTensor(TestClipOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], "float32", -1.0, 1.0), 'x': self.random([2, 3, 4], "float32", -1.0, 1.0),
'max_input': self.random([1], "float32") 'max_input': self.random([1], "float32"),
} }
self.min_val = -random.random() self.min_val = -random.random()
self.max_val = random.random() self.max_val = random.random()
...@@ -124,11 +125,13 @@ class TestClipOpMaxTensor(TestClipOp): ...@@ -124,11 +125,13 @@ class TestClipOpMaxTensor(TestClipOp):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
max_input = paddle.static.data( max_input = paddle.static.data(
name='max_input', name='max_input',
shape=self.feed_data['max_input'].shape, shape=self.feed_data['max_input'].shape,
dtype=self.feed_data['max_input'].dtype) dtype=self.feed_data['max_input'].dtype,
)
return {'X': [x], 'Max': [max_input]} return {'X': [x], 'Max': [max_input]}
...@@ -136,7 +139,7 @@ class TestClipOpMaxTensorInt32(TestClipOpMaxTensor): ...@@ -136,7 +139,7 @@ class TestClipOpMaxTensorInt32(TestClipOpMaxTensor):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], "int32"), 'x': self.random([2, 3, 4], "int32"),
'max_input': self.random([1], "int32") 'max_input': self.random([1], "int32"),
} }
self.min_val = -random.random() self.min_val = -random.random()
self.max_val = random.random() self.max_val = random.random()
...@@ -146,7 +149,7 @@ class TestClipOpMaxTensorFloat64(TestClipOpMaxTensor): ...@@ -146,7 +149,7 @@ class TestClipOpMaxTensorFloat64(TestClipOpMaxTensor):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], "float64"), 'x': self.random([2, 3, 4], "float64"),
'max_input': self.random([1], "float64") 'max_input': self.random([1], "float64"),
} }
self.min_val = -random.random() self.min_val = -random.random()
self.max_val = random.random() self.max_val = random.random()
...@@ -156,7 +159,7 @@ class TestClipOpMaxTensorTypeCast(TestClipOpMaxTensor): ...@@ -156,7 +159,7 @@ class TestClipOpMaxTensorTypeCast(TestClipOpMaxTensor):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], "float64"), 'x': self.random([2, 3, 4], "float64"),
'max_input': self.random([1], "float32") 'max_input': self.random([1], "float32"),
} }
self.min_val = -random.random() self.min_val = -random.random()
self.max_val = random.random() self.max_val = random.random()
...@@ -166,7 +169,7 @@ class TestClipOpMinTensor(TestClipOp): ...@@ -166,7 +169,7 @@ class TestClipOpMinTensor(TestClipOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], "float32"), 'x': self.random([2, 3, 4], "float32"),
'min_input': self.random([1], "float32") 'min_input': self.random([1], "float32"),
} }
self.min_val = -random.random() self.min_val = -random.random()
self.max_val = random.random() self.max_val = random.random()
...@@ -175,11 +178,13 @@ class TestClipOpMinTensor(TestClipOp): ...@@ -175,11 +178,13 @@ class TestClipOpMinTensor(TestClipOp):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
min_input = paddle.static.data( min_input = paddle.static.data(
name='min_input', name='min_input',
shape=self.feed_data['min_input'].shape, shape=self.feed_data['min_input'].shape,
dtype=self.feed_data['min_input'].dtype) dtype=self.feed_data['min_input'].dtype,
)
return {'X': [x], 'Min': [min_input]} return {'X': [x], 'Min': [min_input]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -190,7 +195,7 @@ class TestClipOpMinTensorInt32(TestClipOpMinTensor): ...@@ -190,7 +195,7 @@ class TestClipOpMinTensorInt32(TestClipOpMinTensor):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], "int32"), 'x': self.random([2, 3, 4], "int32"),
'min_input': self.random([1], "int32") 'min_input': self.random([1], "int32"),
} }
self.min_val = -random.random() self.min_val = -random.random()
self.max_val = random.random() self.max_val = random.random()
...@@ -200,7 +205,7 @@ class TestClipOpMinTensorFloat64(TestClipOpMinTensor): ...@@ -200,7 +205,7 @@ class TestClipOpMinTensorFloat64(TestClipOpMinTensor):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], "float64"), 'x': self.random([2, 3, 4], "float64"),
'min_input': self.random([1], "float64") 'min_input': self.random([1], "float64"),
} }
self.min_val = -random.random() self.min_val = -random.random()
self.max_val = random.random() self.max_val = random.random()
...@@ -210,7 +215,7 @@ class TestClipOpMinTensorTypeCast(TestClipOpMinTensor): ...@@ -210,7 +215,7 @@ class TestClipOpMinTensorTypeCast(TestClipOpMinTensor):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], "float64"), 'x': self.random([2, 3, 4], "float64"),
'min_input': self.random([1], "float32") 'min_input': self.random([1], "float32"),
} }
self.min_val = -random.random() self.min_val = -random.random()
self.max_val = random.random() self.max_val = random.random()
......
...@@ -34,11 +34,13 @@ class TestCompareOp(OpMapperTest): ...@@ -34,11 +34,13 @@ class TestCompareOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data( y = paddle.static.data(
name='y', name='y',
shape=self.feed_data['y'].shape, shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype) dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]} return {'X': [x], 'Y': [y]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -24,7 +24,7 @@ class TestConv2dOp(OpMapperTest): ...@@ -24,7 +24,7 @@ class TestConv2dOp(OpMapperTest):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
"x": self.random([3, 16, 32, 32], "float32"), "x": self.random([3, 16, 32, 32], "float32"),
"weight": self.random([16, 16, 3, 3], "float32") "weight": self.random([16, 16, 3, 3], "float32"),
} }
self.data_format = 'NCHW' self.data_format = 'NCHW'
...@@ -32,10 +32,14 @@ class TestConv2dOp(OpMapperTest): ...@@ -32,10 +32,14 @@ class TestConv2dOp(OpMapperTest):
return "conv2d" return "conv2d"
def set_op_inputs(self): def set_op_inputs(self):
x = paddle.static.data('x', self.feed_data["x"].shape, x = paddle.static.data(
self.feed_data["x"].dtype) 'x', self.feed_data["x"].shape, self.feed_data["x"].dtype
weight = paddle.static.data('weight', self.feed_data["weight"].shape, )
self.feed_data["weight"].dtype) weight = paddle.static.data(
'weight',
self.feed_data["weight"].shape,
self.feed_data["weight"].dtype,
)
return {'Input': [x], 'Filter': [weight]} return {'Input': [x], 'Filter': [weight]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -46,7 +50,7 @@ class TestConv2dOp(OpMapperTest): ...@@ -46,7 +50,7 @@ class TestConv2dOp(OpMapperTest):
"groups": 1, "groups": 1,
"data_format": self.data_format, "data_format": self.data_format,
"padding_algorithm": "EXPLICIT", "padding_algorithm": "EXPLICIT",
"use_cudnn": True "use_cudnn": True,
} }
def set_op_outputs(self): def set_op_outputs(self):
...@@ -60,7 +64,7 @@ class TestConv2dNCHWFP16(TestConv2dOp): ...@@ -60,7 +64,7 @@ class TestConv2dNCHWFP16(TestConv2dOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
"x": self.random([3, 16, 32, 32], "float16"), "x": self.random([3, 16, 32, 32], "float16"),
"weight": self.random([16, 16, 3, 3], "float16") "weight": self.random([16, 16, 3, 3], "float16"),
} }
self.data_format = 'NCHW' self.data_format = 'NCHW'
...@@ -72,7 +76,7 @@ class TestConv2dNHWC(TestConv2dOp): ...@@ -72,7 +76,7 @@ class TestConv2dNHWC(TestConv2dOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
"x": self.random([3, 32, 32, 16], "float32"), "x": self.random([3, 32, 32, 16], "float32"),
"weight": self.random([16, 16, 3, 3], "float32") "weight": self.random([16, 16, 3, 3], "float32"),
} }
self.data_format = 'NHWC' self.data_format = 'NHWC'
...@@ -81,7 +85,7 @@ class TestConv2dNHWCFP16(TestConv2dOp): ...@@ -81,7 +85,7 @@ class TestConv2dNHWCFP16(TestConv2dOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
"x": self.random([3, 32, 32, 16], "float16"), "x": self.random([3, 32, 32, 16], "float16"),
"weight": self.random([16, 16, 3, 3], "float16") "weight": self.random([16, 16, 3, 3], "float16"),
} }
self.data_format = 'NHWC' self.data_format = 'NHWC'
......
...@@ -33,7 +33,8 @@ class TestCumsumOp(OpMapperTest): ...@@ -33,7 +33,8 @@ class TestCumsumOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -24,7 +24,7 @@ class TestElementwiseOp(OpMapperTest): ...@@ -24,7 +24,7 @@ class TestElementwiseOp(OpMapperTest):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([32, 64], "float32"), 'x': self.random([32, 64], "float32"),
'y': self.random([32, 64], "float32") 'y': self.random([32, 64], "float32"),
} }
def set_op_type(self): def set_op_type(self):
...@@ -34,11 +34,13 @@ class TestElementwiseOp(OpMapperTest): ...@@ -34,11 +34,13 @@ class TestElementwiseOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data( y = paddle.static.data(
name='y', name='y',
shape=self.feed_data['y'].shape, shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype) dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]} return {'X': [x], 'Y': [y]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -95,7 +97,7 @@ class TestFloorDivOpCase1(TestElementwiseOp): ...@@ -95,7 +97,7 @@ class TestFloorDivOpCase1(TestElementwiseOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([32, 64], low=1, high=10, dtype='int32'), 'x': self.random([32, 64], low=1, high=10, dtype='int32'),
'y': self.random([32, 64], low=1, high=10, dtype='int32') 'y': self.random([32, 64], low=1, high=10, dtype='int32'),
} }
def set_op_type(self): def set_op_type(self):
...@@ -106,7 +108,7 @@ class TestFloorDivOpCase2(TestElementwiseOp): ...@@ -106,7 +108,7 @@ class TestFloorDivOpCase2(TestElementwiseOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([32], low=1, high=10, dtype='int64'), 'x': self.random([32], low=1, high=10, dtype='int64'),
'y': self.random([32], low=1, high=10, dtype='int64') 'y': self.random([32], low=1, high=10, dtype='int64'),
} }
def set_op_type(self): def set_op_type(self):
......
...@@ -33,7 +33,8 @@ class TestExpandOp(OpMapperTest): ...@@ -33,7 +33,8 @@ class TestExpandOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -33,7 +33,8 @@ class TestExpandV2Op(OpMapperTest): ...@@ -33,7 +33,8 @@ class TestExpandV2Op(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -39,7 +39,7 @@ class TestFillConstantOp(OpMapperTest): ...@@ -39,7 +39,7 @@ class TestFillConstantOp(OpMapperTest):
"shape": self.shape, "shape": self.shape,
"value": float(self.value), "value": float(self.value),
"str_value": self.str_value, "str_value": self.str_value,
"dtype": self.nptype2paddledtype(self.dtype) "dtype": self.nptype2paddledtype(self.dtype),
} }
def set_op_outputs(self): def set_op_outputs(self):
...@@ -72,7 +72,8 @@ class TestFillConstantByValueTensor(TestFillConstantOp): ...@@ -72,7 +72,8 @@ class TestFillConstantByValueTensor(TestFillConstantOp):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {"ValueTensor": [x]} return {"ValueTensor": [x]}
......
...@@ -30,7 +30,8 @@ class TestFlipOp(OpMapperTest): ...@@ -30,7 +30,8 @@ class TestFlipOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -24,7 +24,7 @@ class TestGatherNdOp(OpMapperTest): ...@@ -24,7 +24,7 @@ class TestGatherNdOp(OpMapperTest):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], 'float32'), 'x': self.random([2, 3, 4], 'float32'),
'index': np.array([[1]], dtype='int32') 'index': np.array([[1]], dtype='int32'),
} }
def set_op_type(self): def set_op_type(self):
...@@ -34,11 +34,13 @@ class TestGatherNdOp(OpMapperTest): ...@@ -34,11 +34,13 @@ class TestGatherNdOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
index = paddle.static.data( index = paddle.static.data(
name='index', name='index',
shape=self.feed_data['index'].shape, shape=self.feed_data['index'].shape,
dtype=self.feed_data['index'].dtype) dtype=self.feed_data['index'].dtype,
)
return {'X': [x], 'Index': [index]} return {'X': [x], 'Index': [index]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -55,7 +57,7 @@ class TestGatherNdCase1(TestGatherNdOp): ...@@ -55,7 +57,7 @@ class TestGatherNdCase1(TestGatherNdOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([2, 3, 4], 'float32'), 'x': self.random([2, 3, 4], 'float32'),
'index': np.array([[1, 2, 3]], dtype='int32') 'index': np.array([[1, 2, 3]], dtype='int32'),
} }
......
...@@ -24,7 +24,7 @@ class TestGatherOp(OpMapperTest): ...@@ -24,7 +24,7 @@ class TestGatherOp(OpMapperTest):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([10, 12, 128, 128], 'float32'), 'x': self.random([10, 12, 128, 128], 'float32'),
'index': self.random([5], 'int32', 0, 10) 'index': self.random([5], 'int32', 0, 10),
} }
self.axis = 0 self.axis = 0
...@@ -35,11 +35,13 @@ class TestGatherOp(OpMapperTest): ...@@ -35,11 +35,13 @@ class TestGatherOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
index = paddle.static.data( index = paddle.static.data(
name='index', name='index',
shape=self.feed_data['index'].shape, shape=self.feed_data['index'].shape,
dtype=self.feed_data['index'].dtype) dtype=self.feed_data['index'].dtype,
)
return {'X': [x], 'Index': [index]} return {'X': [x], 'Index': [index]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -41,7 +41,7 @@ class TestGaussianRandomOp(OpMapperTest): ...@@ -41,7 +41,7 @@ class TestGaussianRandomOp(OpMapperTest):
"std": self.std, "std": self.std,
"seed": self.seed, "seed": self.seed,
"shape": self.shape, "shape": self.shape,
"dtype": self.nptype2paddledtype(self.dtype) "dtype": self.nptype2paddledtype(self.dtype),
} }
def set_op_outputs(self): def set_op_outputs(self):
......
...@@ -37,15 +37,18 @@ class TestLayerNormOp(OpMapperTest): ...@@ -37,15 +37,18 @@ class TestLayerNormOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
scale = paddle.static.data( scale = paddle.static.data(
name='scale', name='scale',
shape=self.feed_data['scale'].shape, shape=self.feed_data['scale'].shape,
dtype=self.feed_data['scale'].dtype) dtype=self.feed_data['scale'].dtype,
)
bias = paddle.static.data( bias = paddle.static.data(
name='bias', name='bias',
shape=self.feed_data['bias'].shape, shape=self.feed_data['bias'].shape,
dtype=self.feed_data['bias'].dtype) dtype=self.feed_data['bias'].dtype,
)
return {'X': [x], 'Scale': [scale], "Bias": [bias]} return {'X': [x], 'Scale': [scale], "Bias": [bias]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -55,7 +58,7 @@ class TestLayerNormOp(OpMapperTest): ...@@ -55,7 +58,7 @@ class TestLayerNormOp(OpMapperTest):
return { return {
'Y': [str(self.feed_data['x'].dtype)], 'Y': [str(self.feed_data['x'].dtype)],
'Mean': [str(self.feed_data['scale'].dtype)], 'Mean': [str(self.feed_data['scale'].dtype)],
'Variance': [str(self.feed_data['scale'].dtype)] 'Variance': [str(self.feed_data['scale'].dtype)],
} }
def test_check_results(self): def test_check_results(self):
......
...@@ -33,7 +33,8 @@ class TestLog1pOp(OpMapperTest): ...@@ -33,7 +33,8 @@ class TestLog1pOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -34,11 +34,13 @@ class TestLogicalOp(OpMapperTest): ...@@ -34,11 +34,13 @@ class TestLogicalOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data( y = paddle.static.data(
name='y', name='y',
shape=self.feed_data['y'].shape, shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype) dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]} return {'X': [x], 'Y': [y]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -74,7 +76,8 @@ class TestLogicalNotOp(TestLogicalOp): ...@@ -74,7 +76,8 @@ class TestLogicalNotOp(TestLogicalOp):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
......
...@@ -22,7 +22,7 @@ class TestLookupTableOp(OpMapperTest): ...@@ -22,7 +22,7 @@ class TestLookupTableOp(OpMapperTest):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
"w": self.random([10, 3], "float32"), "w": self.random([10, 3], "float32"),
"ids": self.random([5, 1], "int64", 0, 9) "ids": self.random([5, 1], "int64", 0, 9),
} }
def set_op_type(self): def set_op_type(self):
...@@ -32,11 +32,13 @@ class TestLookupTableOp(OpMapperTest): ...@@ -32,11 +32,13 @@ class TestLookupTableOp(OpMapperTest):
w = paddle.static.data( w = paddle.static.data(
name="w", name="w",
shape=self.feed_data["w"].shape, shape=self.feed_data["w"].shape,
dtype=self.feed_data["w"].dtype) dtype=self.feed_data["w"].dtype,
)
ids = paddle.static.data( ids = paddle.static.data(
name="ids", name="ids",
shape=self.feed_data["ids"].shape, shape=self.feed_data["ids"].shape,
dtype=self.feed_data["ids"].dtype) dtype=self.feed_data["ids"].dtype,
)
return {"W": [w], "Ids": [ids]} return {"W": [w], "Ids": [ids]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -53,7 +55,7 @@ class TestLookupTableOpCase1(TestLookupTableOp): ...@@ -53,7 +55,7 @@ class TestLookupTableOpCase1(TestLookupTableOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
"w": self.random([32, 64], "float64"), "w": self.random([32, 64], "float64"),
"ids": self.random([10, 1], "int64", 0, 31) "ids": self.random([10, 1], "int64", 0, 31),
} }
def set_op_attrs(self): def set_op_attrs(self):
...@@ -64,7 +66,7 @@ class TestLookupTableV2Op(OpMapperTest): ...@@ -64,7 +66,7 @@ class TestLookupTableV2Op(OpMapperTest):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
"w": self.random([10, 3], "float32"), "w": self.random([10, 3], "float32"),
"ids": self.random([5, 2], "int32", 0, 9) "ids": self.random([5, 2], "int32", 0, 9),
} }
def set_op_type(self): def set_op_type(self):
...@@ -74,11 +76,13 @@ class TestLookupTableV2Op(OpMapperTest): ...@@ -74,11 +76,13 @@ class TestLookupTableV2Op(OpMapperTest):
w = paddle.static.data( w = paddle.static.data(
name="w", name="w",
shape=self.feed_data["w"].shape, shape=self.feed_data["w"].shape,
dtype=self.feed_data["w"].dtype) dtype=self.feed_data["w"].dtype,
)
ids = paddle.static.data( ids = paddle.static.data(
name="ids", name="ids",
shape=self.feed_data["ids"].shape, shape=self.feed_data["ids"].shape,
dtype=self.feed_data["ids"].dtype) dtype=self.feed_data["ids"].dtype,
)
return {"W": [w], "Ids": [ids]} return {"W": [w], "Ids": [ids]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -95,7 +99,7 @@ class TestLookupTableV2OpCase1(TestLookupTableV2Op): ...@@ -95,7 +99,7 @@ class TestLookupTableV2OpCase1(TestLookupTableV2Op):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
"w": self.random([32, 64], "float64"), "w": self.random([32, 64], "float64"),
"ids": self.random([10, 3], "int64", 0, 31) "ids": self.random([10, 3], "int64", 0, 31),
} }
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -36,17 +36,19 @@ class TestMulOp(OpMapperTest): ...@@ -36,17 +36,19 @@ class TestMulOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data( y = paddle.static.data(
name='y', name='y',
shape=self.feed_data['y'].shape, shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype) dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]} return {'X': [x], 'Y': [y]}
def set_op_attrs(self): def set_op_attrs(self):
return { return {
"x_num_col_dims": self.x_num_col_dims, "x_num_col_dims": self.x_num_col_dims,
"y_num_col_dims": self.y_num_col_dims "y_num_col_dims": self.y_num_col_dims,
} }
def set_op_outputs(self): def set_op_outputs(self):
......
...@@ -31,7 +31,8 @@ class TestNormOp(OpMapperTest): ...@@ -31,7 +31,8 @@ class TestNormOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -40,7 +41,7 @@ class TestNormOp(OpMapperTest): ...@@ -40,7 +41,7 @@ class TestNormOp(OpMapperTest):
def set_op_outputs(self): def set_op_outputs(self):
return { return {
'Out': [str(self.feed_data['x'].dtype)], 'Out': [str(self.feed_data['x'].dtype)],
"Norm": [str(self.feed_data['x'].dtype)] "Norm": [str(self.feed_data['x'].dtype)],
} }
def test_check_results(self): def test_check_results(self):
......
...@@ -32,14 +32,15 @@ class TestOneHotOp(OpMapperTest): ...@@ -32,14 +32,15 @@ class TestOneHotOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
return { return {
"depth": self.depth, "depth": self.depth,
"dtype": self.nptype2paddledtype(self.dtype), "dtype": self.nptype2paddledtype(self.dtype),
"allow_out_of_range": self.allow_out_of_range "allow_out_of_range": self.allow_out_of_range,
} }
def set_op_outputs(self): def set_op_outputs(self):
...@@ -79,14 +80,15 @@ class TestOneHotV2Op(OpMapperTest): ...@@ -79,14 +80,15 @@ class TestOneHotV2Op(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
return { return {
"depth": self.depth, "depth": self.depth,
"dtype": self.nptype2paddledtype(self.dtype), "dtype": self.nptype2paddledtype(self.dtype),
"allow_out_of_range": self.allow_out_of_range "allow_out_of_range": self.allow_out_of_range,
} }
def set_op_outputs(self): def set_op_outputs(self):
......
...@@ -18,8 +18,9 @@ from op_mapper_test import OpMapperTest ...@@ -18,8 +18,9 @@ from op_mapper_test import OpMapperTest
from cinn.common import * from cinn.common import *
@unittest.skipIf(not is_compiled_with_cudnn(), @unittest.skipIf(
"x86 test will be skipped due to timeout.") not is_compiled_with_cudnn(), "x86 test will be skipped due to timeout."
)
class TestPool2dOp(OpMapperTest): class TestPool2dOp(OpMapperTest):
def init_input_data(self): def init_input_data(self):
self.feed_data = {"x": self.random([2, 3, 7, 7], "float64")} self.feed_data = {"x": self.random([2, 3, 7, 7], "float64")}
...@@ -42,7 +43,8 @@ class TestPool2dOp(OpMapperTest): ...@@ -42,7 +43,8 @@ class TestPool2dOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -57,7 +59,7 @@ class TestPool2dOp(OpMapperTest): ...@@ -57,7 +59,7 @@ class TestPool2dOp(OpMapperTest):
"ceil_mode": self.ceil_mode, "ceil_mode": self.ceil_mode,
"data_format": self.data_format, "data_format": self.data_format,
"padding_algorithm": self.padding_algorithm, "padding_algorithm": self.padding_algorithm,
"use_cudnn": self.use_cudnn "use_cudnn": self.use_cudnn,
} }
def set_op_outputs(self): def set_op_outputs(self):
......
...@@ -34,11 +34,13 @@ class TestPowOp(OpMapperTest): ...@@ -34,11 +34,13 @@ class TestPowOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
factor = paddle.static.data( factor = paddle.static.data(
name='factor', name='factor',
shape=self.feed_data['factor'].shape, shape=self.feed_data['factor'].shape,
dtype=self.feed_data['factor'].dtype) dtype=self.feed_data['factor'].dtype,
)
return {'X': [x], 'FactorTensor': [factor]} return {'X': [x], 'FactorTensor': [factor]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -72,7 +74,8 @@ class TestPowOpInFactorAttr(TestPowOp): ...@@ -72,7 +74,8 @@ class TestPowOpInFactorAttr(TestPowOp):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -41,7 +41,7 @@ class TestRandIntOp(OpMapperTest): ...@@ -41,7 +41,7 @@ class TestRandIntOp(OpMapperTest):
"high": self.max, "high": self.max,
"seed": self.seed, "seed": self.seed,
"shape": self.shape, "shape": self.shape,
"dtype": self.nptype2paddledtype(self.dtype) "dtype": self.nptype2paddledtype(self.dtype),
} }
def set_op_outputs(self): def set_op_outputs(self):
......
...@@ -33,7 +33,8 @@ class TestReduceOp(OpMapperTest): ...@@ -33,7 +33,8 @@ class TestReduceOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -124,7 +125,7 @@ class TestReduceOutType(TestReduceOp): ...@@ -124,7 +125,7 @@ class TestReduceOutType(TestReduceOp):
return { return {
"dim": self.dim, "dim": self.dim,
"keep_dim": self.keepdim, "keep_dim": self.keepdim,
"out_dtype": self.nptype2paddledtype("float64") "out_dtype": self.nptype2paddledtype("float64"),
} }
...@@ -133,7 +134,7 @@ class TestReduceUnkOutType(TestReduceOp): ...@@ -133,7 +134,7 @@ class TestReduceUnkOutType(TestReduceOp):
return { return {
"dim": self.dim, "dim": self.dim,
"keep_dim": self.keepdim, "keep_dim": self.keepdim,
"out_dtype": self.nptype2paddledtype("unk") "out_dtype": self.nptype2paddledtype("unk"),
} }
......
...@@ -30,7 +30,8 @@ class TestReverseOp(OpMapperTest): ...@@ -30,7 +30,8 @@ class TestReverseOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -35,7 +35,8 @@ class TestRollOp(OpMapperTest): ...@@ -35,7 +35,8 @@ class TestRollOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -34,14 +34,15 @@ class TestScaleOp(OpMapperTest): ...@@ -34,14 +34,15 @@ class TestScaleOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
return { return {
"scale": self.scale, "scale": self.scale,
"bias": self.bias, "bias": self.bias,
"bias_after_scale": self.bias_after_scale "bias_after_scale": self.bias_after_scale,
} }
def set_op_outputs(self): def set_op_outputs(self):
...@@ -71,7 +72,7 @@ class TestScaleWithScaleTensor(TestScaleOp): ...@@ -71,7 +72,7 @@ class TestScaleWithScaleTensor(TestScaleOp):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([32, 64], "float32"), 'x': self.random([32, 64], "float32"),
"scale": self.random([1], "float32", 2.0, 10.0) "scale": self.random([1], "float32", 2.0, 10.0),
} }
self.bias = 2.0 self.bias = 2.0
self.bias_after_scale = True self.bias_after_scale = True
...@@ -80,11 +81,13 @@ class TestScaleWithScaleTensor(TestScaleOp): ...@@ -80,11 +81,13 @@ class TestScaleWithScaleTensor(TestScaleOp):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
scale = paddle.static.data( scale = paddle.static.data(
name='scale', name='scale',
shape=self.feed_data['scale'].shape, shape=self.feed_data['scale'].shape,
dtype=self.feed_data['scale'].dtype) dtype=self.feed_data['scale'].dtype,
)
return {'X': [x], "ScaleTensor": [scale]} return {'X': [x], "ScaleTensor": [scale]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -95,7 +98,7 @@ class TestScaleWithScaleTensorCase1(TestScaleWithScaleTensor): ...@@ -95,7 +98,7 @@ class TestScaleWithScaleTensorCase1(TestScaleWithScaleTensor):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([32, 64], "float32"), 'x': self.random([32, 64], "float32"),
"scale": self.random([1], "float32", 2.0, 10.0) "scale": self.random([1], "float32", 2.0, 10.0),
} }
self.bias = 0.0 self.bias = 0.0
self.bias_after_scale = True self.bias_after_scale = True
...@@ -105,7 +108,7 @@ class TestScaleWithScaleTensorCase2(TestScaleWithScaleTensor): ...@@ -105,7 +108,7 @@ class TestScaleWithScaleTensorCase2(TestScaleWithScaleTensor):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([32, 64], "int32"), 'x': self.random([32, 64], "int32"),
"scale": self.random([1], "float32", 2.0, 10.0) "scale": self.random([1], "float32", 2.0, 10.0),
} }
self.bias = 0.0 self.bias = 0.0
self.bias_after_scale = True self.bias_after_scale = True
......
...@@ -27,13 +27,10 @@ class TestScatterOp(OpMapperTest): ...@@ -27,13 +27,10 @@ class TestScatterOp(OpMapperTest):
dim1 = 10 dim1 = 10
x_data = self.random([dim0, dim1], "float32") x_data = self.random([dim0, dim1], "float32")
ids_data = np.random.randint( ids_data = np.random.randint(
0, dim0, [random.randint(1, 5)], dtype=np.int32) 0, dim0, [random.randint(1, 5)], dtype=np.int32
)
updates_data = self.random([len(ids_data), dim1], "float32") updates_data = self.random([len(ids_data), dim1], "float32")
self.feed_data = { self.feed_data = {'x': x_data, 'ids': ids_data, 'updates': updates_data}
'x': x_data,
'ids': ids_data,
'updates': updates_data
}
def set_op_type(self): def set_op_type(self):
return "scatter" return "scatter"
...@@ -42,15 +39,18 @@ class TestScatterOp(OpMapperTest): ...@@ -42,15 +39,18 @@ class TestScatterOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
ids = paddle.static.data( ids = paddle.static.data(
name='ids', name='ids',
shape=self.feed_data['ids'].shape, shape=self.feed_data['ids'].shape,
dtype=self.feed_data['ids'].dtype) dtype=self.feed_data['ids'].dtype,
)
updates = paddle.static.data( updates = paddle.static.data(
name='updates', name='updates',
shape=self.feed_data['updates'].shape, shape=self.feed_data['updates'].shape,
dtype=self.feed_data['updates'].dtype) dtype=self.feed_data['updates'].dtype,
)
return {'X': [x], 'Ids': [ids], 'Updates': [updates]} return {'X': [x], 'Ids': [ids], 'Updates': [updates]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -69,15 +69,12 @@ class TestScatterOpOverWrite(TestScatterOp): ...@@ -69,15 +69,12 @@ class TestScatterOpOverWrite(TestScatterOp):
dim1 = 10 dim1 = 10
x_data = self.random([dim0, dim1], "float32") x_data = self.random([dim0, dim1], "float32")
ids_data = np.random.randint( ids_data = np.random.randint(
0, dim0, [random.randint(1, 10)], dtype=np.int32) 0, dim0, [random.randint(1, 10)], dtype=np.int32
)
# remove duplicate elements, because paddle has undetermined behavior for duplicate elements # remove duplicate elements, because paddle has undetermined behavior for duplicate elements
ids_data = np.unique(ids_data) ids_data = np.unique(ids_data)
updates_data = self.random([len(ids_data), dim1], "float32") updates_data = self.random([len(ids_data), dim1], "float32")
self.feed_data = { self.feed_data = {'x': x_data, 'ids': ids_data, 'updates': updates_data}
'x': x_data,
'ids': ids_data,
'updates': updates_data
}
def set_op_attrs(self): def set_op_attrs(self):
return {'overwrite': True} return {'overwrite': True}
......
...@@ -33,7 +33,8 @@ class TestSignOp(OpMapperTest): ...@@ -33,7 +33,8 @@ class TestSignOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -35,7 +35,8 @@ class TestSplitOp(OpMapperTest): ...@@ -35,7 +35,8 @@ class TestSplitOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -33,7 +33,8 @@ class TestSqueezeOp(OpMapperTest): ...@@ -33,7 +33,8 @@ class TestSqueezeOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -42,7 +43,7 @@ class TestSqueezeOp(OpMapperTest): ...@@ -42,7 +43,7 @@ class TestSqueezeOp(OpMapperTest):
def set_op_outputs(self): def set_op_outputs(self):
return { return {
'Out': [str(self.feed_data['x'].dtype)], 'Out': [str(self.feed_data['x'].dtype)],
"XShape": [str(self.feed_data['x'].dtype)] "XShape": [str(self.feed_data['x'].dtype)],
} }
def skip_check_outputs(self): def skip_check_outputs(self):
......
...@@ -35,7 +35,8 @@ class TestStackOp(OpMapperTest): ...@@ -35,7 +35,8 @@ class TestStackOp(OpMapperTest):
paddle.static.data( paddle.static.data(
name=var_name, name=var_name,
shape=self.feed_data[var_name].shape, shape=self.feed_data[var_name].shape,
dtype=self.feed_data[var_name].dtype) dtype=self.feed_data[var_name].dtype,
)
for var_name in self.feed_data.keys() for var_name in self.feed_data.keys()
] ]
return {'X': x} return {'X': x}
......
...@@ -38,7 +38,8 @@ class TestStridedSliceOp(OpMapperTest): ...@@ -38,7 +38,8 @@ class TestStridedSliceOp(OpMapperTest):
inputs = paddle.static.data( inputs = paddle.static.data(
name='inputs', name='inputs',
shape=self.feed_data['inputs'].shape, shape=self.feed_data['inputs'].shape,
dtype=self.feed_data['inputs'].dtype) dtype=self.feed_data['inputs'].dtype,
)
return {'Input': [inputs]} return {'Input': [inputs]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -47,7 +48,7 @@ class TestStridedSliceOp(OpMapperTest): ...@@ -47,7 +48,7 @@ class TestStridedSliceOp(OpMapperTest):
"starts": self.starts, "starts": self.starts,
"ends": self.ends, "ends": self.ends,
"strides": self.strides, "strides": self.strides,
"infer_flags": self.infer_flags "infer_flags": self.infer_flags,
} }
def set_op_outputs(self): def set_op_outputs(self):
......
...@@ -43,25 +43,31 @@ class TestTakeAlongAxisOp(OpMapperTest): ...@@ -43,25 +43,31 @@ class TestTakeAlongAxisOp(OpMapperTest):
def set_op_inputs(self): def set_op_inputs(self):
broadcast_shape = infer_broadcast_shape( broadcast_shape = infer_broadcast_shape(
self.feed_data['x'], self.feed_data['index'], self.axis) self.feed_data['x'], self.feed_data['index'], self.axis
)
if not broadcast_shape: if not broadcast_shape:
broadcast_shape = self.feed_data['index'].shape broadcast_shape = self.feed_data['index'].shape
self.feed_data['index'] = np.broadcast_to(self.feed_data['index'], self.feed_data['index'] = np.broadcast_to(
broadcast_shape).copy() self.feed_data['index'], broadcast_shape
).copy()
broadcast_shape_list = list(broadcast_shape) broadcast_shape_list = list(broadcast_shape)
broadcast_shape_list[self.axis] = list( broadcast_shape_list[self.axis] = list(self.feed_data['x'].shape)[
self.feed_data['x'].shape)[self.axis] self.axis
]
broadcast_shape = tuple(broadcast_shape_list) broadcast_shape = tuple(broadcast_shape_list)
self.feed_data['x'] = np.broadcast_to(self.feed_data['x'], self.feed_data['x'] = np.broadcast_to(
broadcast_shape).copy() self.feed_data['x'], broadcast_shape
).copy()
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
index = paddle.static.data( index = paddle.static.data(
name='index', name='index',
shape=self.feed_data['index'].shape, shape=self.feed_data['index'].shape,
dtype=self.feed_data['index'].dtype) dtype=self.feed_data['index'].dtype,
)
return {'Input': [x], 'Index': [index]} return {'Input': [x], 'Index': [index]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -34,7 +34,8 @@ class TestTileOp(OpMapperTest): ...@@ -34,7 +34,8 @@ class TestTileOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -37,7 +37,8 @@ class TestTranspose2Op(OpMapperTest): ...@@ -37,7 +37,8 @@ class TestTranspose2Op(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
...@@ -46,7 +47,7 @@ class TestTranspose2Op(OpMapperTest): ...@@ -46,7 +47,7 @@ class TestTranspose2Op(OpMapperTest):
def set_op_outputs(self): def set_op_outputs(self):
return { return {
'Out': [str(self.feed_data['x'].dtype)], 'Out': [str(self.feed_data['x'].dtype)],
'XShape': [str(self.feed_data['x'].dtype)] 'XShape': [str(self.feed_data['x'].dtype)],
} }
def skip_check_outputs(self): def skip_check_outputs(self):
......
...@@ -23,7 +23,7 @@ class TestTriangularSolveOp(OpMapperTest): ...@@ -23,7 +23,7 @@ class TestTriangularSolveOp(OpMapperTest):
def init_input_data(self): def init_input_data(self):
self.feed_data = { self.feed_data = {
'x': self.random([32, 32], "float32"), 'x': self.random([32, 32], "float32"),
'y': self.random([32, 128], "float32") 'y': self.random([32, 128], "float32"),
} }
def set_op_type(self): def set_op_type(self):
...@@ -33,11 +33,13 @@ class TestTriangularSolveOp(OpMapperTest): ...@@ -33,11 +33,13 @@ class TestTriangularSolveOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data( y = paddle.static.data(
name='y', name='y',
shape=self.feed_data['y'].shape, shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype) dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]} return {'X': [x], 'Y': [y]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -31,7 +31,8 @@ class TestUnaryOp(OpMapperTest): ...@@ -31,7 +31,8 @@ class TestUnaryOp(OpMapperTest):
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
return {'X': [x]} return {'X': [x]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -47,7 +47,7 @@ class TestUniformRandomOp(OpMapperTest): ...@@ -47,7 +47,7 @@ class TestUniformRandomOp(OpMapperTest):
"dtype": self.nptype2paddledtype(self.dtype), "dtype": self.nptype2paddledtype(self.dtype),
"diag_num": self.diag_num, "diag_num": self.diag_num,
"diag_step": self.diag_step, "diag_step": self.diag_step,
"diag_val": self.diag_val "diag_val": self.diag_val,
} }
def set_op_outputs(self): def set_op_outputs(self):
......
...@@ -35,15 +35,18 @@ class TestWhereOp(OpMapperTest): ...@@ -35,15 +35,18 @@ class TestWhereOp(OpMapperTest):
condition = paddle.static.data( condition = paddle.static.data(
name='condition', name='condition',
shape=self.feed_data['condition'].shape, shape=self.feed_data['condition'].shape,
dtype=self.feed_data['condition'].dtype) dtype=self.feed_data['condition'].dtype,
)
x = paddle.static.data( x = paddle.static.data(
name='x', name='x',
shape=self.feed_data['x'].shape, shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype) dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data( y = paddle.static.data(
name='y', name='y',
shape=self.feed_data['y'].shape, shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype) dtype=self.feed_data['y'].dtype,
)
return {'Condition': [condition], 'X': [x], "Y": [y]} return {'Condition': [condition], 'X': [x], "Y": [y]}
def set_op_attrs(self): def set_op_attrs(self):
......
...@@ -95,17 +95,13 @@ class OpTest(unittest.TestCase): ...@@ -95,17 +95,13 @@ class OpTest(unittest.TestCase):
def build_cinn_program(self, target): def build_cinn_program(self, target):
raise Exception("Not implemented.") raise Exception("Not implemented.")
def get_cinn_output(self, def get_cinn_output(
prog, self, prog, target, inputs, feed_data, outputs, passes=[], scope=None
target, ):
inputs,
feed_data,
outputs,
passes=[],
scope=None):
fetch_ids = {str(out) for out in outputs} fetch_ids = {str(out) for out in outputs}
result = prog.build_and_get_output( result = prog.build_and_get_output(
target, inputs, feed_data, outputs, passes=passes, scope=scope) target, inputs, feed_data, outputs, passes=passes, scope=scope
)
outs_and_grads = [] outs_and_grads = []
for res in result: for res in result:
outs_and_grads.append(res.numpy(target)) outs_and_grads.append(res.numpy(target))
...@@ -127,33 +123,49 @@ class OpTest(unittest.TestCase): ...@@ -127,33 +123,49 @@ class OpTest(unittest.TestCase):
logger.debug("============ After Decomposer Pass ============") logger.debug("============ After Decomposer Pass ============")
print_program(prog) print_program(prog)
def check_outputs_and_grads(self, def check_outputs_and_grads(
max_relative_error=1e-5, self,
max_absolute_error=1e-6, max_relative_error=1e-5,
all_equal=False, max_absolute_error=1e-6,
equal_nan=False): all_equal=False,
equal_nan=False,
):
self.build_paddle_program(self.target) self.build_paddle_program(self.target)
self.build_cinn_program(self.target) self.build_cinn_program(self.target)
logger.debug("============ Check Outputs ============") logger.debug("============ Check Outputs ============")
self.check_results(self.paddle_outputs, self.cinn_outputs, self.check_results(
max_relative_error, max_absolute_error, all_equal, self.paddle_outputs,
equal_nan, "Outputs") self.cinn_outputs,
max_relative_error,
max_absolute_error,
all_equal,
equal_nan,
"Outputs",
)
if len(self.cinn_grads) != 0: if len(self.cinn_grads) != 0:
logger.debug("============ Check Grads ============") logger.debug("============ Check Grads ============")
self.check_results(self.paddle_grads, self.cinn_grads, self.check_results(
max_relative_error, max_absolute_error, self.paddle_grads,
all_equal, equal_nan, "Grads") self.cinn_grads,
max_relative_error,
def check_results(self, max_absolute_error,
expect_res, all_equal,
actual_res, equal_nan,
max_relative_error, "Grads",
max_absolute_error, )
all_equal=False,
equal_nan=False, def check_results(
name="Outputs"): self,
expect_res,
actual_res,
max_relative_error,
max_absolute_error,
all_equal=False,
equal_nan=False,
name="Outputs",
):
def _compute_error_message(output_id, expect, actual): def _compute_error_message(output_id, expect, actual):
absolute_diff = np.abs(expect - actual).flatten() absolute_diff = np.abs(expect - actual).flatten()
relative_diff = absolute_diff / np.abs(expect).flatten() relative_diff = absolute_diff / np.abs(expect).flatten()
...@@ -166,26 +178,40 @@ class OpTest(unittest.TestCase): ...@@ -166,26 +178,40 @@ class OpTest(unittest.TestCase):
max_relative_diff = relative_diff[i] max_relative_diff = relative_diff[i]
if absolute_diff[i] > max_absolute_diff: if absolute_diff[i] > max_absolute_diff:
max_absolute_diff = absolute_diff[i] max_absolute_diff = absolute_diff[i]
if relative_diff[i] > max_relative_error or absolute_diff[ if (
i] > max_absolute_error: relative_diff[i] > max_relative_error
or absolute_diff[i] > max_absolute_error
):
num_diffs = num_diffs + 1 num_diffs = num_diffs + 1
offset = i if offset == -1 else offset offset = i if offset == -1 else offset
# The following print can be used to debug. # The following print can be used to debug.
# print("i=%d, %e vs %e, relative_diff=%e, absolute_diff=%e" % (i, expect.flatten()[i], actual.flatten()[i], relative_diff[i], absolute_diff[i])) # print("i=%d, %e vs %e, relative_diff=%e, absolute_diff=%e" % (i, expect.flatten()[i], actual.flatten()[i], relative_diff[i], absolute_diff[i]))
error_message = "[%s] The %d-th output: total %d different results, offset=%d, shape=%s, %e vs %e. Maximum diff of the whole array: maximum_relative_diff=%e, maximum_absolute_diff=%e." % ( error_message = (
self._get_device(), output_id, num_diffs, offset, "[%s] The %d-th output: total %d different results, offset=%d, shape=%s, %e vs %e. Maximum diff of the whole array: maximum_relative_diff=%e, maximum_absolute_diff=%e."
str(expect.shape), expect.flatten()[offset], % (
actual.flatten()[offset], max_relative_diff, max_absolute_diff) self._get_device(),
output_id,
num_diffs,
offset,
str(expect.shape),
expect.flatten()[offset],
actual.flatten()[offset],
max_relative_diff,
max_absolute_diff,
)
)
return error_message return error_message
def _check_error_message(output_id, expect, actual): def _check_error_message(output_id, expect, actual):
expect_flatten = expect.flatten() expect_flatten = expect.flatten()
actual_flatten = actual.flatten() actual_flatten = actual.flatten()
self.assertEqual( self.assertEqual(
len(expect_flatten), len(actual_flatten), len(expect_flatten),
"[{}] The {}-th output size different, which expect shape is {} but actual is {}." len(actual_flatten),
.format(self._get_device(), output_id, expect.shape, "[{}] The {}-th output size different, which expect shape is {} but actual is {}.".format(
actual.shape)) self._get_device(), output_id, expect.shape, actual.shape
),
)
num_diffs = 0 num_diffs = 0
offset = -1 offset = -1
for i in range(len(expect_flatten)): for i in range(len(expect_flatten)):
...@@ -194,8 +220,13 @@ class OpTest(unittest.TestCase): ...@@ -194,8 +220,13 @@ class OpTest(unittest.TestCase):
offset = i if offset == -1 else offset offset = i if offset == -1 else offset
error_message = "[{}] The {}-th output: total {} different results, the first different result's offset={}, where expect value is {} but actual is {}.".format( error_message = "[{}] The {}-th output: total {} different results, the first different result's offset={}, where expect value is {} but actual is {}.".format(
self._get_device(), output_id, num_diffs, offset, self._get_device(),
expect_flatten[offset], actual_flatten[offset]) output_id,
num_diffs,
offset,
expect_flatten[offset],
actual_flatten[offset],
)
return error_message return error_message
self.assertEqual(len(expect_res), len(actual_res)) self.assertEqual(len(expect_res), len(actual_res))
...@@ -218,24 +249,25 @@ class OpTest(unittest.TestCase): ...@@ -218,24 +249,25 @@ class OpTest(unittest.TestCase):
self.assertEqual( self.assertEqual(
expect.dtype, expect.dtype,
actual.dtype, actual.dtype,
msg= msg="[{}] The {}-th output dtype different, which expect shape is {} but actual is {}.".format(
"[{}] The {}-th output dtype different, which expect shape is {} but actual is {}." self._get_device(), i, expect.dtype, actual.dtype
.format(self._get_device(), i, expect.dtype, actual.dtype)) ),
)
# NOTE: Paddle's 0D Tensor will be changed to 1D when calling tensor.numpy(), # NOTE: Paddle's 0D Tensor will be changed to 1D when calling tensor.numpy(),
# only check non-0D Tensor's shape here. 0D-Tensor's shape will be verified by `test_zero_dim_tensor.py` # only check non-0D Tensor's shape here. 0D-Tensor's shape will be verified by `test_zero_dim_tensor.py`
if len(expect.shape) != 0 and len(actual.shape) != 0: if len(expect.shape) != 0 and len(actual.shape) != 0:
self.assertEqual( self.assertEqual(
expect.shape, expect.shape,
actual.shape, actual.shape,
msg= msg="[{}] The {}-th output shape different, which expect shape is {} but actual is {}.".format(
"[{}] The {}-th output shape different, which expect shape is {} but actual is {}." self._get_device(), i, expect.shape, actual.shape
.format(self._get_device(), i, expect.shape, actual.shape)) ),
)
should_all_equal = all_equal or (actual.dtype in [ should_all_equal = all_equal or (
np.dtype('bool'), actual.dtype
np.dtype('int32'), in [np.dtype('bool'), np.dtype('int32'), np.dtype('int64')]
np.dtype('int64') )
])
if expect.dtype == np.uint16: if expect.dtype == np.uint16:
expect_float = convert_uint16_to_float(expect) expect_float = convert_uint16_to_float(expect)
...@@ -250,17 +282,24 @@ class OpTest(unittest.TestCase): ...@@ -250,17 +282,24 @@ class OpTest(unittest.TestCase):
actual, actual,
atol=max_absolute_error, atol=max_absolute_error,
rtol=max_relative_error, rtol=max_relative_error,
equal_nan=equal_nan) equal_nan=equal_nan,
)
# _compute_error_message checks which values have absolute or relative error # _compute_error_message checks which values have absolute or relative error
error_message = "np.allclose(expect, actual, atol={}, rtol={}) checks succeed!".format( error_message = (
max_absolute_error, max_relative_error "np.allclose(expect, actual, atol={}, rtol={}) checks succeed!".format(
) if is_allclose else _compute_error_message( max_absolute_error, max_relative_error
i, expect, actual) )
if is_allclose
else _compute_error_message(i, expect, actual)
)
else: else:
is_allclose = np.all(expect == actual) is_allclose = np.all(expect == actual)
# _check_error_message checks which values are not equal # _check_error_message checks which values are not equal
error_message = "(expect == actual) checks succeed!" if is_allclose else _check_error_message( error_message = (
i, expect, actual) "(expect == actual) checks succeed!"
if is_allclose
else _check_error_message(i, expect, actual)
)
error_message = "[Check " + name + "] " + error_message error_message = "[Check " + name + "] " + error_message
...@@ -285,7 +324,7 @@ class OpTest(unittest.TestCase): ...@@ -285,7 +324,7 @@ class OpTest(unittest.TestCase):
# "uint16": UInt(16), # "uint16": UInt(16),
"uint32": UInt(32), "uint32": UInt(32),
"uint64": UInt(64), "uint64": UInt(64),
"bool": Bool() "bool": Bool(),
} }
assert str(dtype) in switch_map, str(dtype) + " not support in CINN" assert str(dtype) in switch_map, str(dtype) + " not support in CINN"
return switch_map[str(dtype)] return switch_map[str(dtype)]
...@@ -302,12 +341,19 @@ class OpTest(unittest.TestCase): ...@@ -302,12 +341,19 @@ class OpTest(unittest.TestCase):
return np.random.uniform(low, high, shape).astype(dtype) return np.random.uniform(low, high, shape).astype(dtype)
elif dtype == "bfloat16": elif dtype == "bfloat16":
return convert_float_to_uint16( return convert_float_to_uint16(
np.random.uniform(low, high, shape).astype("float32")) np.random.uniform(low, high, shape).astype("float32")
)
elif dtype == "bool": elif dtype == "bool":
return np.random.choice(a=[False, True], size=shape).astype(dtype) return np.random.choice(a=[False, True], size=shape).astype(dtype)
elif dtype in [ elif dtype in [
"uint8", "uint16", "uint32", "uint64", "int8", "int16", "uint8",
"int32", "int64" "uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]: ]:
return np.random.randint(low, high, shape).astype(dtype) return np.random.randint(low, high, shape).astype(dtype)
else: else:
......
...@@ -26,11 +26,12 @@ parser.add_argument( ...@@ -26,11 +26,12 @@ parser.add_argument(
"--case", "--case",
type=str, type=str,
help="Which case you want to test, default -1 for all cases.", help="Which case you want to test, default -1 for all cases.",
default=None) default=None,
)
args = parser.parse_args() args = parser.parse_args()
class TestCaseHelper(): class TestCaseHelper:
""" """
Helper class for constructing test cases. Helper class for constructing test cases.
""" """
...@@ -69,7 +70,10 @@ class TestCaseHelper(): ...@@ -69,7 +70,10 @@ class TestCaseHelper():
assert isinstance(self.attrs, list) assert isinstance(self.attrs, list)
self.all_cases = [] self.all_cases = []
all_lists = [ all_lists = [
self.inputs, self.dtypes, self.attrs, *self.custom_attrs_list self.inputs,
self.dtypes,
self.attrs,
*self.custom_attrs_list,
] ]
filtered_lists = filter(lambda x: len(x) > 0, all_lists) filtered_lists = filter(lambda x: len(x) > 0, all_lists)
for case in itertools.product(*filtered_lists): for case in itertools.product(*filtered_lists):
...@@ -87,13 +91,21 @@ class TestCaseHelper(): ...@@ -87,13 +91,21 @@ class TestCaseHelper():
no = int(re.search(r'\d+$', test_name).group(0)) no = int(re.search(r'\d+$', test_name).group(0))
assert 0 <= no and no < len(self.all_cases) assert 0 <= no and no < len(self.all_cases)
self.all_classes.append( self.all_classes.append(
type(f'{self.__class__.__name__}.{self.class_name}{no}', type(
(self.cls, ), {"case": self.all_cases[no]})) f'{self.__class__.__name__}.{self.class_name}{no}',
(self.cls,),
{"case": self.all_cases[no]},
)
)
else: else:
for i, case in enumerate(self.all_cases): for i, case in enumerate(self.all_cases):
self.all_classes.append( self.all_classes.append(
type(f'{self.__class__.__name__}.{self.class_name}{i}', type(
(self.cls, ), {"case": case})) f'{self.__class__.__name__}.{self.class_name}{i}',
(self.cls,),
{"case": case},
)
)
def run(self): def run(self):
""" """
......
...@@ -22,8 +22,9 @@ from cinn.frontend import * ...@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAbsOp(OpTest): class TestAbsOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -34,7 +35,8 @@ class TestAbsOp(OpTest): ...@@ -34,7 +35,8 @@ class TestAbsOp(OpTest):
shape=self.case["x_shape"], shape=self.case["x_shape"],
dtype=self.case["x_dtype"], dtype=self.case["x_dtype"],
low=-100, low=-100,
high=100) high=100,
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True) x = paddle.to_tensor(self.x_np, stop_gradient=True)
...@@ -45,8 +47,10 @@ class TestAbsOp(OpTest): ...@@ -45,8 +47,10 @@ class TestAbsOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("identity") builder = NetBuilder("identity")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.abs(x) out = builder.abs(x)
prog = builder.build() prog = builder.build()
...@@ -63,24 +67,34 @@ class TestAbsOpShape(TestCaseHelper): ...@@ -63,24 +67,34 @@ class TestAbsOpShape(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestAbsOpShape" self.class_name = "TestAbsOpShape"
self.cls = TestAbsOp self.cls = TestAbsOp
self.inputs = [{ self.inputs = [
"x_shape": [1], {
}, { "x_shape": [1],
"x_shape": [1024], },
}, { {
"x_shape": [1, 2048], "x_shape": [1024],
}, { },
"x_shape": [1, 1, 1], {
}, { "x_shape": [1, 2048],
"x_shape": [32, 64], },
}, { {
"x_shape": [16, 8, 4, 2], "x_shape": [1, 1, 1],
}, { },
"x_shape": [16, 8, 4, 2, 1], {
}] "x_shape": [32, 64],
self.dtypes = [{ },
"x_dtype": "float32", {
}] "x_shape": [16, 8, 4, 2],
},
{
"x_shape": [16, 8, 4, 2, 1],
},
]
self.dtypes = [
{
"x_dtype": "float32",
}
]
self.attrs = [] self.attrs = []
...@@ -88,21 +102,26 @@ class TestAbsOpDtype(TestCaseHelper): ...@@ -88,21 +102,26 @@ class TestAbsOpDtype(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestAbsOpDtype" self.class_name = "TestAbsOpDtype"
self.cls = TestAbsOp self.cls = TestAbsOp
self.inputs = [{ self.inputs = [
"x_shape": [32, 64], {
}] "x_shape": [32, 64],
self.dtypes = [{ }
"x_dtype": "int32", ]
}, { self.dtypes = [
"x_dtype": "int64", {
}, { "x_dtype": "int32",
"x_dtype": "float16", },
"max_relative_error": 1e-3 {
}, { "x_dtype": "int64",
"x_dtype": "float32", },
}, { {"x_dtype": "float16", "max_relative_error": 1e-3},
"x_dtype": "float64", {
}] "x_dtype": "float32",
},
{
"x_dtype": "float64",
},
]
self.attrs = [] self.attrs = []
......
...@@ -22,8 +22,9 @@ from cinn.frontend import * ...@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAcosOp(OpTest): class TestAcosOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -34,7 +35,8 @@ class TestAcosOp(OpTest): ...@@ -34,7 +35,8 @@ class TestAcosOp(OpTest):
shape=self.case["x_shape"], shape=self.case["x_shape"],
dtype=self.case["x_dtype"], dtype=self.case["x_dtype"],
low=-1, low=-1,
high=1) high=1,
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False) x = paddle.to_tensor(self.x_np, stop_gradient=False)
...@@ -45,8 +47,10 @@ class TestAcosOp(OpTest): ...@@ -45,8 +47,10 @@ class TestAcosOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("acos") builder = NetBuilder("acos")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.acos(x) out = builder.acos(x)
...@@ -56,8 +60,11 @@ class TestAcosOp(OpTest): ...@@ -56,8 +60,11 @@ class TestAcosOp(OpTest):
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -66,11 +73,12 @@ class TestAcosCase1(TestCaseHelper): ...@@ -66,11 +73,12 @@ class TestAcosCase1(TestCaseHelper):
self.class_name = "TestAcosCase1" self.class_name = "TestAcosCase1"
self.cls = TestAcosOp self.cls = TestAcosOp
self.inputs = [{"x_shape": [512, 256]}] self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{ self.dtypes = [
"x_dtype": "float32" {"x_dtype": "float32"},
}, { {
"x_dtype": "float64", "x_dtype": "float64",
}] },
]
self.attrs = [] self.attrs = []
...@@ -78,23 +86,16 @@ class TestAcosCase2(TestCaseHelper): ...@@ -78,23 +86,16 @@ class TestAcosCase2(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestAcosCase2" self.class_name = "TestAcosCase2"
self.cls = TestAcosOp self.cls = TestAcosOp
self.inputs = [{ self.inputs = [
"x_shape": [1] {"x_shape": [1]},
}, { {"x_shape": [1024]},
"x_shape": [1024] {"x_shape": [512, 256]},
}, { {"x_shape": [128, 64, 32]},
"x_shape": [512, 256] {"x_shape": [128, 2048, 32]},
}, { {"x_shape": [16, 8, 4, 2]},
"x_shape": [128, 64, 32] {"x_shape": [1, 1, 1, 1]},
}, { {"x_shape": [16, 8, 4, 2, 1]},
"x_shape": [128, 2048, 32] ]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.dtypes = [{"x_dtype": "float32"}] self.dtypes = [{"x_dtype": "float32"}]
self.attrs = [] self.attrs = []
......
...@@ -22,8 +22,9 @@ from cinn.frontend import * ...@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAcoshOp(OpTest): class TestAcoshOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -34,7 +35,8 @@ class TestAcoshOp(OpTest): ...@@ -34,7 +35,8 @@ class TestAcoshOp(OpTest):
low=2, low=2,
high=100, high=100,
shape=self.case["x_shape"], shape=self.case["x_shape"],
dtype=self.case["x_dtype"]) dtype=self.case["x_dtype"],
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False) x = paddle.to_tensor(self.x_np, stop_gradient=False)
...@@ -45,8 +47,10 @@ class TestAcoshOp(OpTest): ...@@ -45,8 +47,10 @@ class TestAcoshOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("acosh") builder = NetBuilder("acosh")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.acosh(x) out = builder.acosh(x)
...@@ -56,8 +60,11 @@ class TestAcoshOp(OpTest): ...@@ -56,8 +60,11 @@ class TestAcoshOp(OpTest):
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -66,11 +73,12 @@ class TestAcoshCase1(TestCaseHelper): ...@@ -66,11 +73,12 @@ class TestAcoshCase1(TestCaseHelper):
self.class_name = "TestAcoshCase1" self.class_name = "TestAcoshCase1"
self.cls = TestAcoshOp self.cls = TestAcoshOp
self.inputs = [{"x_shape": [512, 256]}] self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{ self.dtypes = [
"x_dtype": "float32" {"x_dtype": "float32"},
}, { {
"x_dtype": "float64", "x_dtype": "float64",
}] },
]
self.attrs = [] self.attrs = []
...@@ -78,23 +86,16 @@ class TestAcoshCase2(TestCaseHelper): ...@@ -78,23 +86,16 @@ class TestAcoshCase2(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestAcoshCase2" self.class_name = "TestAcoshCase2"
self.cls = TestAcoshOp self.cls = TestAcoshOp
self.inputs = [{ self.inputs = [
"x_shape": [1] {"x_shape": [1]},
}, { {"x_shape": [1024]},
"x_shape": [1024] {"x_shape": [512, 256]},
}, { {"x_shape": [128, 64, 32]},
"x_shape": [512, 256] {"x_shape": [128, 2048, 32]},
}, { {"x_shape": [16, 8, 4, 2]},
"x_shape": [128, 64, 32] {"x_shape": [1, 1, 1, 1]},
}, { {"x_shape": [16, 8, 4, 2, 1]},
"x_shape": [128, 2048, 32] ]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.dtypes = [{"x_dtype": "float32"}] self.dtypes = [{"x_dtype": "float32"}]
self.attrs = [] self.attrs = []
......
...@@ -20,8 +20,9 @@ from op_test import OpTest, OpTestTool ...@@ -20,8 +20,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestElementwiseAddOp(OpTest): class TestElementwiseAddOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -32,14 +33,17 @@ class TestElementwiseAddOp(OpTest): ...@@ -32,14 +33,17 @@ class TestElementwiseAddOp(OpTest):
shape=self.case["x_shape"], shape=self.case["x_shape"],
dtype=self.case["x_dtype"], dtype=self.case["x_dtype"],
low=-10, low=-10,
high=10) high=10,
)
self.y_np = self.random( self.y_np = self.random(
shape=self.case["y_shape"], shape=self.case["y_shape"],
dtype=self.case["y_dtype"], dtype=self.case["y_dtype"],
low=-10, low=-10,
high=10) high=10,
)
self.dout_np = self.random( self.dout_np = self.random(
self.case["dout_shape"], dtype=self.case["dout_dtype"]) self.case["dout_shape"], dtype=self.case["dout_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False) x = paddle.to_tensor(self.x_np, stop_gradient=False)
...@@ -48,49 +52,69 @@ class TestElementwiseAddOp(OpTest): ...@@ -48,49 +52,69 @@ class TestElementwiseAddOp(OpTest):
def get_unsqueeze_axis(x_rank, y_rank, axis): def get_unsqueeze_axis(x_rank, y_rank, axis):
self.assertTrue( self.assertTrue(
x_rank >= y_rank, x_rank >= y_rank,
"The rank of x should be greater or equal to that of y.") "The rank of x should be greater or equal to that of y.",
)
axis = axis if axis >= 0 else x_rank - y_rank axis = axis if axis >= 0 else x_rank - y_rank
unsqueeze_axis = np.arange(0, axis).tolist() + np.arange( unsqueeze_axis = (
axis + y_rank, x_rank).tolist() np.arange(0, axis).tolist()
+ np.arange(axis + y_rank, x_rank).tolist()
)
return unsqueeze_axis return unsqueeze_axis
unsqueeze_axis = get_unsqueeze_axis( unsqueeze_axis = get_unsqueeze_axis(
len(x.shape), len(y.shape), self.case["axis"]) len(x.shape), len(y.shape), self.case["axis"]
y_t = paddle.unsqueeze( )
y, axis=unsqueeze_axis) if len(unsqueeze_axis) > 0 else y y_t = (
paddle.unsqueeze(y, axis=unsqueeze_axis)
if len(unsqueeze_axis) > 0
else y
)
out = paddle.add(x, y_t) out = paddle.add(x, y_t)
self.paddle_outputs = [out] self.paddle_outputs = [out]
self.paddle_grads = self.get_paddle_grads([out], [x, y], self.paddle_grads = self.get_paddle_grads([out], [x, y], [self.dout_np])
[self.dout_np])
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("add") builder = NetBuilder("add")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
y = builder.create_input( y = builder.create_input(
self.nptype2cinntype(self.case["y_dtype"]), self.case["y_shape"], self.nptype2cinntype(self.case["y_dtype"]),
"y") self.case["y_shape"],
"y",
)
out = builder.add(x, y, axis=self.case["axis"]) out = builder.add(x, y, axis=self.case["axis"])
dout = builder.create_input( dout = builder.create_input(
self.nptype2cinntype(self.case["dout_dtype"]), self.nptype2cinntype(self.case["dout_dtype"]),
self.case["dout_shape"], "dout") self.case["dout_shape"],
"dout",
)
x_grad, y_grad = builder.elementwise_add_grad( x_grad, y_grad = builder.elementwise_add_grad(
dout, x, y, axis=self.case["axis"]) dout, x, y, axis=self.case["axis"]
)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x, y, dout], res = self.get_cinn_output(
[self.x_np, self.y_np, self.dout_np], prog,
[out, x_grad, y_grad]) target,
[x, y, dout],
[self.x_np, self.y_np, self.dout_np],
[out, x_grad, y_grad],
)
self.cinn_outputs = [res[0]] self.cinn_outputs = [res[0]]
self.cinn_grads = [res[1], res[2]] self.cinn_grads = [res[1], res[2]]
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
......
...@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool ...@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestArangeOp(OpTest): class TestArangeOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -34,18 +35,26 @@ class TestArangeOp(OpTest): ...@@ -34,18 +35,26 @@ class TestArangeOp(OpTest):
"start": self.case["start"], "start": self.case["start"],
"end": self.case["end"], "end": self.case["end"],
"step": self.case["step"], "step": self.case["step"],
"dtype": self.case["dtype"] "dtype": self.case["dtype"],
} }
def build_paddle_program(self, target): def build_paddle_program(self, target):
out = paddle.arange(self.inputs["start"], self.inputs["end"], out = paddle.arange(
self.inputs["step"], self.inputs["dtype"]) self.inputs["start"],
self.inputs["end"],
self.inputs["step"],
self.inputs["dtype"],
)
self.paddle_outputs = [out] self.paddle_outputs = [out]
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("arange") builder = NetBuilder("arange")
out = builder.arange(self.inputs["start"], self.inputs["end"], out = builder.arange(
self.inputs["step"], self.inputs["dtype"]) self.inputs["start"],
self.inputs["end"],
self.inputs["step"],
self.inputs["dtype"],
)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [], [], [out]) res = self.get_cinn_output(prog, target, [], [], [out])
...@@ -141,9 +150,7 @@ class TestArangeOpShapeAndAttr(TestCaseHelper): ...@@ -141,9 +150,7 @@ class TestArangeOpShapeAndAttr(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [] self.attrs = []
...@@ -170,18 +177,10 @@ class TestArangeOpDtype(TestCaseHelper): ...@@ -170,18 +177,10 @@ class TestArangeOpDtype(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "int32"},
"dtype": "int32" {"dtype": "int64"},
}, {"dtype": "float32"},
{ {"dtype": "float64"},
"dtype": "int64"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
] ]
self.attrs = [] self.attrs = []
......
...@@ -24,18 +24,21 @@ from cinn.frontend import * ...@@ -24,18 +24,21 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestArgSortOp(OpTest): class TestArgSortOp(OpTest):
def setUp(self): def setUp(self):
self.init_case() self.init_case()
def init_case(self): def init_case(self):
self.inputs = { self.inputs = {
"x1": np.random.random([ "x1": np.random.random(
2, [
4, 2,
]).astype("float32") 4,
]
).astype("float32")
} }
self.axis = 1 self.axis = 1
self.descending = False self.descending = False
...@@ -51,8 +54,9 @@ class TestArgSortOp(OpTest): ...@@ -51,8 +54,9 @@ class TestArgSortOp(OpTest):
x1 = builder.create_input(Float(32), self.inputs["x1"].shape, "x1") x1 = builder.create_input(Float(32), self.inputs["x1"].shape, "x1")
out = builder.argsort(x1, self.axis, not self.descending) out = builder.argsort(x1, self.axis, not self.descending)
prog = builder.build() prog = builder.build()
forward_res = self.get_cinn_output(prog, target, [x1], forward_res = self.get_cinn_output(
[self.inputs["x1"]], out) prog, target, [x1], [self.inputs["x1"]], out
)
self.cinn_outputs = np.array([forward_res[0]]).astype("int64") self.cinn_outputs = np.array([forward_res[0]]).astype("int64")
...@@ -63,10 +67,12 @@ class TestArgSortOp(OpTest): ...@@ -63,10 +67,12 @@ class TestArgSortOp(OpTest):
class TestArgSortCase1(TestArgSortOp): class TestArgSortCase1(TestArgSortOp):
def init_case(self): def init_case(self):
self.inputs = { self.inputs = {
"x1": np.random.random([ "x1": np.random.random(
2, [
4, 2,
]).astype("float32") 4,
]
).astype("float32")
} }
self.axis = 0 self.axis = 0
self.descending = False self.descending = False
...@@ -75,10 +81,12 @@ class TestArgSortCase1(TestArgSortOp): ...@@ -75,10 +81,12 @@ class TestArgSortCase1(TestArgSortOp):
class TestArgSortCase2(TestArgSortOp): class TestArgSortCase2(TestArgSortOp):
def init_case(self): def init_case(self):
self.inputs = { self.inputs = {
"x1": np.random.random([ "x1": np.random.random(
2, [
4, 2,
]).astype("float32") 4,
]
).astype("float32")
} }
self.axis = 0 self.axis = 0
self.descending = True self.descending = True
...@@ -87,10 +95,12 @@ class TestArgSortCase2(TestArgSortOp): ...@@ -87,10 +95,12 @@ class TestArgSortCase2(TestArgSortOp):
class TestArgSortCase3(TestArgSortOp): class TestArgSortCase3(TestArgSortOp):
def init_case(self): def init_case(self):
self.inputs = { self.inputs = {
"x1": np.random.random([ "x1": np.random.random(
2, [
4, 2,
]).astype("float32") 4,
]
).astype("float32")
} }
self.axis = 1 self.axis = 1
self.descending = True self.descending = True
...@@ -99,10 +109,12 @@ class TestArgSortCase3(TestArgSortOp): ...@@ -99,10 +109,12 @@ class TestArgSortCase3(TestArgSortOp):
class TestArgSortCase4(TestArgSortOp): class TestArgSortCase4(TestArgSortOp):
def init_case(self): def init_case(self):
self.inputs = { self.inputs = {
"x1": np.random.random([ "x1": np.random.random(
2, [
4, 2,
]).astype("float32") 4,
]
).astype("float32")
} }
self.axis = -1 self.axis = -1
self.descending = True self.descending = True
......
...@@ -23,8 +23,9 @@ from cinn.frontend import * ...@@ -23,8 +23,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAsinOp(OpTest): class TestAsinOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -35,7 +36,8 @@ class TestAsinOp(OpTest): ...@@ -35,7 +36,8 @@ class TestAsinOp(OpTest):
shape=self.case["x_shape"], shape=self.case["x_shape"],
dtype=self.case["x_dtype"], dtype=self.case["x_dtype"],
low=-1.0, low=-1.0,
high=1.0) high=1.0,
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True) x = paddle.to_tensor(self.x_np, stop_gradient=True)
...@@ -45,8 +47,10 @@ class TestAsinOp(OpTest): ...@@ -45,8 +47,10 @@ class TestAsinOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("unary_elementwise_test") builder = NetBuilder("unary_elementwise_test")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.asin(x) out = builder.asin(x)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out]) res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])
...@@ -61,24 +65,34 @@ class TestAsinOpShape(TestCaseHelper): ...@@ -61,24 +65,34 @@ class TestAsinOpShape(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestAsinOpShape" self.class_name = "TestAsinOpShape"
self.cls = TestAsinOp self.cls = TestAsinOp
self.inputs = [{ self.inputs = [
"x_shape": [1], {
}, { "x_shape": [1],
"x_shape": [1024], },
}, { {
"x_shape": [1, 2048], "x_shape": [1024],
}, { },
"x_shape": [1, 1, 1], {
}, { "x_shape": [1, 2048],
"x_shape": [32, 64], },
}, { {
"x_shape": [16, 8, 4, 2], "x_shape": [1, 1, 1],
}, { },
"x_shape": [16, 8, 4, 2, 1], {
}] "x_shape": [32, 64],
self.dtypes = [{ },
"x_dtype": "float32", {
}] "x_shape": [16, 8, 4, 2],
},
{
"x_shape": [16, 8, 4, 2, 1],
},
]
self.dtypes = [
{
"x_dtype": "float32",
}
]
self.attrs = [] self.attrs = []
...@@ -86,14 +100,13 @@ class TestAsinOpDtype(TestCaseHelper): ...@@ -86,14 +100,13 @@ class TestAsinOpDtype(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestAsinOpDtype" self.class_name = "TestAsinOpDtype"
self.cls = TestAsinOp self.cls = TestAsinOp
self.inputs = [{ self.inputs = [
"x_shape": [32, 64],
}]
self.dtypes = [
{ {
"x_dtype": "float16", "x_shape": [32, 64],
"max_relative_error": 1e-3 }
}, ]
self.dtypes = [
{"x_dtype": "float16", "max_relative_error": 1e-3},
{ {
"x_dtype": "float32", "x_dtype": "float32",
}, },
......
...@@ -22,8 +22,9 @@ from cinn.frontend import * ...@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAsinhOp(OpTest): class TestAsinhOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -31,7 +32,8 @@ class TestAsinhOp(OpTest): ...@@ -31,7 +32,8 @@ class TestAsinhOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False) x = paddle.to_tensor(self.x_np, stop_gradient=False)
...@@ -42,8 +44,10 @@ class TestAsinhOp(OpTest): ...@@ -42,8 +44,10 @@ class TestAsinhOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("asinh") builder = NetBuilder("asinh")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.asinh(x) out = builder.asinh(x)
...@@ -53,8 +57,11 @@ class TestAsinhOp(OpTest): ...@@ -53,8 +57,11 @@ class TestAsinhOp(OpTest):
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -63,11 +70,12 @@ class TestAsinhCase1(TestCaseHelper): ...@@ -63,11 +70,12 @@ class TestAsinhCase1(TestCaseHelper):
self.class_name = "TestAsinhCase1" self.class_name = "TestAsinhCase1"
self.cls = TestAsinhOp self.cls = TestAsinhOp
self.inputs = [{"x_shape": [512, 256]}] self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{ self.dtypes = [
"x_dtype": "float32" {"x_dtype": "float32"},
}, { {
"x_dtype": "float64", "x_dtype": "float64",
}] },
]
self.attrs = [] self.attrs = []
...@@ -75,23 +83,16 @@ class TestAsinhCase2(TestCaseHelper): ...@@ -75,23 +83,16 @@ class TestAsinhCase2(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestAsinhCase2" self.class_name = "TestAsinhCase2"
self.cls = TestAsinhOp self.cls = TestAsinhOp
self.inputs = [{ self.inputs = [
"x_shape": [1] {"x_shape": [1]},
}, { {"x_shape": [1024]},
"x_shape": [1024] {"x_shape": [512, 256]},
}, { {"x_shape": [128, 64, 32]},
"x_shape": [512, 256] {"x_shape": [128, 2048, 32]},
}, { {"x_shape": [16, 8, 4, 2]},
"x_shape": [128, 64, 32] {"x_shape": [1, 1, 1, 1]},
}, { {"x_shape": [16, 8, 4, 2, 1]},
"x_shape": [128, 2048, 32] ]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.dtypes = [{"x_dtype": "float32"}] self.dtypes = [{"x_dtype": "float32"}]
self.attrs = [] self.attrs = []
......
...@@ -19,8 +19,9 @@ from cinn.frontend import * ...@@ -19,8 +19,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAtan2Op(OpTest): class TestAtan2Op(OpTest):
def setUp(self): def setUp(self):
# print(f"\n{self.__class__.__name__}: {self.case}") # print(f"\n{self.__class__.__name__}: {self.case}")
...@@ -31,12 +32,14 @@ class TestAtan2Op(OpTest): ...@@ -31,12 +32,14 @@ class TestAtan2Op(OpTest):
shape=self.case["x_shape"], shape=self.case["x_shape"],
dtype=self.case["x_dtype"], dtype=self.case["x_dtype"],
low=self.case["x_low"], low=self.case["x_low"],
high=self.case["x_high"]) high=self.case["x_high"],
)
self.y_np = self.random( self.y_np = self.random(
shape=self.case["y_shape"], shape=self.case["y_shape"],
dtype=self.case["y_dtype"], dtype=self.case["y_dtype"],
low=self.case["y_low"], low=self.case["y_low"],
high=self.case["y_high"]) high=self.case["y_high"],
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False) x = paddle.to_tensor(self.x_np, stop_gradient=False)
...@@ -47,20 +50,28 @@ class TestAtan2Op(OpTest): ...@@ -47,20 +50,28 @@ class TestAtan2Op(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("atan2") builder = NetBuilder("atan2")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
y = builder.create_input( y = builder.create_input(
self.nptype2cinntype(self.case["y_dtype"]), self.case["y_shape"], self.nptype2cinntype(self.case["y_dtype"]),
"y") self.case["y_shape"],
"y",
)
out = builder.atan2(x, y) out = builder.atan2(x, y)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x, y], res = self.get_cinn_output(
[self.x_np, self.y_np], [out]) prog, target, [x, y], [self.x_np, self.y_np], [out]
)
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -100,12 +111,14 @@ class TestAtan2OpShapes(TestCaseHelper): ...@@ -100,12 +111,14 @@ class TestAtan2OpShapes(TestCaseHelper):
"y_dtype": "float32", "y_dtype": "float32",
}, },
] ]
self.attrs = [{ self.attrs = [
"x_low": -10, {
"x_high": 10, "x_low": -10,
"y_low": -10, "x_high": 10,
"y_high": 10, "y_low": -10,
}] "y_high": 10,
}
]
class TestAtan2OpDtypes(TestAtan2OpShapes): class TestAtan2OpDtypes(TestAtan2OpShapes):
......
...@@ -22,8 +22,9 @@ from cinn.frontend import * ...@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAtanOp(OpTest): class TestAtanOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -31,7 +32,8 @@ class TestAtanOp(OpTest): ...@@ -31,7 +32,8 @@ class TestAtanOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False) x = paddle.to_tensor(self.x_np, stop_gradient=False)
...@@ -42,8 +44,10 @@ class TestAtanOp(OpTest): ...@@ -42,8 +44,10 @@ class TestAtanOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("atan") builder = NetBuilder("atan")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.atan(x) out = builder.atan(x)
...@@ -53,8 +57,11 @@ class TestAtanOp(OpTest): ...@@ -53,8 +57,11 @@ class TestAtanOp(OpTest):
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -63,11 +70,12 @@ class TestAtanCase1(TestCaseHelper): ...@@ -63,11 +70,12 @@ class TestAtanCase1(TestCaseHelper):
self.class_name = "TestAtanCase1" self.class_name = "TestAtanCase1"
self.cls = TestAtanOp self.cls = TestAtanOp
self.inputs = [{"x_shape": [512, 256]}] self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{ self.dtypes = [
"x_dtype": "float32" {"x_dtype": "float32"},
}, { {
"x_dtype": "float64", "x_dtype": "float64",
}] },
]
self.attrs = [] self.attrs = []
...@@ -75,23 +83,16 @@ class TestAtanCase2(TestCaseHelper): ...@@ -75,23 +83,16 @@ class TestAtanCase2(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestAtanCase2" self.class_name = "TestAtanCase2"
self.cls = TestAtanOp self.cls = TestAtanOp
self.inputs = [{ self.inputs = [
"x_shape": [1] {"x_shape": [1]},
}, { {"x_shape": [1024]},
"x_shape": [1024] {"x_shape": [512, 256]},
}, { {"x_shape": [128, 64, 32]},
"x_shape": [512, 256] {"x_shape": [128, 2048, 32]},
}, { {"x_shape": [16, 8, 4, 2]},
"x_shape": [128, 64, 32] {"x_shape": [1, 1, 1, 1]},
}, { {"x_shape": [16, 8, 4, 2, 1]},
"x_shape": [128, 2048, 32] ]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.dtypes = [{"x_dtype": "float32"}] self.dtypes = [{"x_dtype": "float32"}]
self.attrs = [] self.attrs = []
......
...@@ -22,8 +22,9 @@ from cinn.frontend import * ...@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAtanhOp(OpTest): class TestAtanhOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -31,7 +32,8 @@ class TestAtanhOp(OpTest): ...@@ -31,7 +32,8 @@ class TestAtanhOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False) x = paddle.to_tensor(self.x_np, stop_gradient=False)
...@@ -42,8 +44,10 @@ class TestAtanhOp(OpTest): ...@@ -42,8 +44,10 @@ class TestAtanhOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("atanh") builder = NetBuilder("atanh")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.atanh(x) out = builder.atanh(x)
...@@ -53,8 +57,11 @@ class TestAtanhOp(OpTest): ...@@ -53,8 +57,11 @@ class TestAtanhOp(OpTest):
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -63,11 +70,12 @@ class TestAtanhCase1(TestCaseHelper): ...@@ -63,11 +70,12 @@ class TestAtanhCase1(TestCaseHelper):
self.class_name = "TestAtanhCase1" self.class_name = "TestAtanhCase1"
self.cls = TestAtanhOp self.cls = TestAtanhOp
self.inputs = [{"x_shape": [512, 256]}] self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{ self.dtypes = [
"x_dtype": "float32" {"x_dtype": "float32"},
}, { {
"x_dtype": "float64", "x_dtype": "float64",
}] },
]
self.attrs = [] self.attrs = []
...@@ -75,23 +83,16 @@ class TestAtanhCase2(TestCaseHelper): ...@@ -75,23 +83,16 @@ class TestAtanhCase2(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestAtanhCase2" self.class_name = "TestAtanhCase2"
self.cls = TestAtanhOp self.cls = TestAtanhOp
self.inputs = [{ self.inputs = [
"x_shape": [1] {"x_shape": [1]},
}, { {"x_shape": [1024]},
"x_shape": [1024] {"x_shape": [512, 256]},
}, { {"x_shape": [128, 64, 32]},
"x_shape": [512, 256] {"x_shape": [128, 2048, 32]},
}, { {"x_shape": [16, 8, 4, 2]},
"x_shape": [128, 64, 32] {"x_shape": [1, 1, 1, 1]},
}, { {"x_shape": [16, 8, 4, 2, 1]},
"x_shape": [128, 2048, 32] ]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.dtypes = [{"x_dtype": "float32"}] self.dtypes = [{"x_dtype": "float32"}]
self.attrs = [] self.attrs = []
......
...@@ -24,8 +24,9 @@ from cinn.frontend import * ...@@ -24,8 +24,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBatchNormTrainOp(OpTest): class TestBatchNormTrainOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -33,12 +34,14 @@ class TestBatchNormTrainOp(OpTest): ...@@ -33,12 +34,14 @@ class TestBatchNormTrainOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
batch_norm = paddle.nn.BatchNorm( batch_norm = paddle.nn.BatchNorm(
self.case["x_shape"][1], act=None, is_test=False) self.case["x_shape"][1], act=None, is_test=False
)
out = batch_norm(x) out = batch_norm(x)
self.paddle_outputs = [out] self.paddle_outputs = [out]
...@@ -48,27 +51,37 @@ class TestBatchNormTrainOp(OpTest): ...@@ -48,27 +51,37 @@ class TestBatchNormTrainOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("batch_norm") builder = NetBuilder("batch_norm")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
scale = builder.fill_constant([self.case["x_shape"][1]], 1.0, 'scale', "x",
'float32') )
bias = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'bias', scale = builder.fill_constant(
'float32') [self.case["x_shape"][1]], 1.0, 'scale', 'float32'
mean = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'mean', )
'float32') bias = builder.fill_constant(
variance = builder.fill_constant([self.case["x_shape"][1]], 1.0, [self.case["x_shape"][1]], 0.0, 'bias', 'float32'
'variance', 'float32') )
mean = builder.fill_constant(
[self.case["x_shape"][1]], 0.0, 'mean', 'float32'
)
variance = builder.fill_constant(
[self.case["x_shape"][1]], 1.0, 'variance', 'float32'
)
out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False) out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)
prog = builder.build() prog = builder.build()
forward_res = self.get_cinn_output( forward_res = self.get_cinn_output(
prog, target, [x], [self.x_np], out, passes=[]) prog, target, [x], [self.x_np], out, passes=[]
)
self.cinn_outputs = [forward_res[0]] self.cinn_outputs = [forward_res[0]]
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -89,24 +102,16 @@ class TestBatchNormTrainOpAll(TestCaseHelper): ...@@ -89,24 +102,16 @@ class TestBatchNormTrainOpAll(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"x_dtype": "float16", "max_relative_error": 1e-3},
"x_dtype": "float16", {"x_dtype": "float32", "max_relative_error": 1e-5},
"max_relative_error": 1e-3 {"x_dtype": "bfloat16", "max_relative_error": 1e-2},
},
{
"x_dtype": "float32",
"max_relative_error": 1e-5
},
{
"x_dtype": "bfloat16",
"max_relative_error": 1e-2
},
] ]
self.attrs = [] self.attrs = []
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBatchNormBackwardOp(OpTest): class TestBatchNormBackwardOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -114,14 +119,17 @@ class TestBatchNormBackwardOp(OpTest): ...@@ -114,14 +119,17 @@ class TestBatchNormBackwardOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
self.y_np = self.random( self.y_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False) x = paddle.to_tensor(self.x_np, stop_gradient=False)
batch_norm = paddle.nn.BatchNorm( batch_norm = paddle.nn.BatchNorm(
self.case["x_shape"][1], act=None, is_test=False) self.case["x_shape"][1], act=None, is_test=False
)
out = batch_norm(x) out = batch_norm(x)
self.paddle_outputs = [out] self.paddle_outputs = [out]
...@@ -132,52 +140,72 @@ class TestBatchNormBackwardOp(OpTest): ...@@ -132,52 +140,72 @@ class TestBatchNormBackwardOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("batch_norm") builder = NetBuilder("batch_norm")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
scale = builder.fill_constant([self.case["x_shape"][1]], 1.0, 'scale', "x",
'float32') )
bias = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'bias', scale = builder.fill_constant(
'float32') [self.case["x_shape"][1]], 1.0, 'scale', 'float32'
mean = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'mean', )
'float32') bias = builder.fill_constant(
variance = builder.fill_constant([self.case["x_shape"][1]], 1.0, [self.case["x_shape"][1]], 0.0, 'bias', 'float32'
'variance', 'float32') )
mean = builder.fill_constant(
[self.case["x_shape"][1]], 0.0, 'mean', 'float32'
)
variance = builder.fill_constant(
[self.case["x_shape"][1]], 1.0, 'variance', 'float32'
)
out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False) out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)
prog = builder.build() prog = builder.build()
forward_res = self.get_cinn_output( forward_res = self.get_cinn_output(
prog, target, [x], [self.x_np], out, passes=[]) prog, target, [x], [self.x_np], out, passes=[]
)
self.cinn_outputs = [forward_res[0]] self.cinn_outputs = [forward_res[0]]
builder_grad = NetBuilder("batch_norm_grad") builder_grad = NetBuilder("batch_norm_grad")
dout = builder_grad.create_input( dout = builder_grad.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"dout") self.case["x_shape"],
"dout",
)
x_g = builder_grad.create_input( x_g = builder_grad.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x_g") self.case["x_shape"],
scale_g = builder_grad.fill_constant(scale.shape(), 1.0, 'scale_g', "x_g",
'float32') )
scale_g = builder_grad.fill_constant(
scale.shape(), 1.0, 'scale_g', 'float32'
)
save_mean = builder_grad.create_input( save_mean = builder_grad.create_input(
self.nptype2cinntype('float32'), out[1].shape(), "save_mean") self.nptype2cinntype('float32'), out[1].shape(), "save_mean"
)
save_variance = builder_grad.create_input( save_variance = builder_grad.create_input(
self.nptype2cinntype('float32'), out[2].shape(), "save_variance") self.nptype2cinntype('float32'), out[2].shape(), "save_variance"
)
out_grad = builder_grad.batch_norm_grad(dout, x_g, scale_g, save_mean, out_grad = builder_grad.batch_norm_grad(
save_variance) dout, x_g, scale_g, save_mean, save_variance
)
prog = builder_grad.build() prog = builder_grad.build()
backward_res = self.get_cinn_output( backward_res = self.get_cinn_output(
prog, prog,
target, [dout, x_g, save_mean, save_variance], target,
[dout, x_g, save_mean, save_variance],
[self.y_np, self.x_np, forward_res[1], forward_res[2]], [self.y_np, self.x_np, forward_res[1], forward_res[2]],
out_grad, out_grad,
passes=[]) passes=[],
)
self.cinn_grads = [backward_res[0]] self.cinn_grads = [backward_res[0]]
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -198,20 +226,15 @@ class TestBatchNormBackwardOpAll(TestCaseHelper): ...@@ -198,20 +226,15 @@ class TestBatchNormBackwardOpAll(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"x_dtype": "float16", "max_relative_error": 1e-3},
"x_dtype": "float16", {"x_dtype": "float32", "max_relative_error": 1e-5},
"max_relative_error": 1e-3
},
{
"x_dtype": "float32",
"max_relative_error": 1e-5
},
] ]
self.attrs = [] self.attrs = []
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBatchNormInferOp(OpTest): class TestBatchNormInferOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -219,12 +242,14 @@ class TestBatchNormInferOp(OpTest): ...@@ -219,12 +242,14 @@ class TestBatchNormInferOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
batch_norm = paddle.nn.BatchNorm( batch_norm = paddle.nn.BatchNorm(
self.case["x_shape"][1], act=None, is_test=True) self.case["x_shape"][1], act=None, is_test=True
)
out = batch_norm(x) out = batch_norm(x)
self.paddle_outputs = [out] self.paddle_outputs = [out]
...@@ -234,22 +259,29 @@ class TestBatchNormInferOp(OpTest): ...@@ -234,22 +259,29 @@ class TestBatchNormInferOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("batch_norm") builder = NetBuilder("batch_norm")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
scale = builder.fill_constant([self.case["x_shape"][1]], 1.0, 'scale', "x",
'float32') )
bias = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'bias', scale = builder.fill_constant(
'float32') [self.case["x_shape"][1]], 1.0, 'scale', 'float32'
mean = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'mean', )
'float32') bias = builder.fill_constant(
variance = builder.fill_constant([self.case["x_shape"][1]], 1.0, [self.case["x_shape"][1]], 0.0, 'bias', 'float32'
'variance', 'float32') )
mean = builder.fill_constant(
[self.case["x_shape"][1]], 0.0, 'mean', 'float32'
)
variance = builder.fill_constant(
[self.case["x_shape"][1]], 1.0, 'variance', 'float32'
)
out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False) out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)
prog = builder.build() prog = builder.build()
forward_res = self.get_cinn_output( forward_res = self.get_cinn_output(
prog, target, [x], [self.x_np], out, passes=[]) prog, target, [x], [self.x_np], out, passes=[]
)
self.cinn_outputs = [forward_res[0]] self.cinn_outputs = [forward_res[0]]
def test_check_results(self): def test_check_results(self):
...@@ -273,10 +305,7 @@ class TestBatchNormInferOpAll(TestCaseHelper): ...@@ -273,10 +305,7 @@ class TestBatchNormInferOpAll(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"x_dtype": "float32", "max_relative_error": 1e-5},
"x_dtype": "float32",
"max_relative_error": 1e-5
},
] ]
self.attrs = [] self.attrs = []
......
...@@ -23,8 +23,9 @@ from cinn.frontend import * ...@@ -23,8 +23,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBinaryOp(OpTest): class TestBinaryOp(OpTest):
def setUp(self): def setUp(self):
self.init_case() self.init_case()
...@@ -55,18 +56,24 @@ class TestBinaryOp(OpTest): ...@@ -55,18 +56,24 @@ class TestBinaryOp(OpTest):
def get_unsqueeze_axis(x_rank, y_rank, axis): def get_unsqueeze_axis(x_rank, y_rank, axis):
self.assertTrue( self.assertTrue(
x_rank >= y_rank, x_rank >= y_rank,
"The rank of x should be greater or equal to that of y.") "The rank of x should be greater or equal to that of y.",
)
axis = axis if axis >= 0 else x_rank - y_rank axis = axis if axis >= 0 else x_rank - y_rank
unsqueeze_axis = np.arange(0, axis).tolist() + np.arange( unsqueeze_axis = (
axis + y_rank, x_rank).tolist() np.arange(0, axis).tolist()
+ np.arange(axis + y_rank, x_rank).tolist()
)
return unsqueeze_axis return unsqueeze_axis
unsqueeze_axis = get_unsqueeze_axis( unsqueeze_axis = get_unsqueeze_axis(
len(self.inputs["x"].shape), len(self.inputs["y"].shape), len(self.inputs["x"].shape), len(self.inputs["y"].shape), self.axis
self.axis) )
y_t = paddle.unsqueeze( y_t = (
y, axis=unsqueeze_axis) if len(unsqueeze_axis) > 0 else y paddle.unsqueeze(y, axis=unsqueeze_axis)
if len(unsqueeze_axis) > 0
else y
)
out = self.paddle_func(x, y_t) out = self.paddle_func(x, y_t)
self.paddle_outputs = [out] self.paddle_outputs = [out]
...@@ -75,15 +82,20 @@ class TestBinaryOp(OpTest): ...@@ -75,15 +82,20 @@ class TestBinaryOp(OpTest):
builder = NetBuilder("binary_elementwise_test") builder = NetBuilder("binary_elementwise_test")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype), self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x") self.inputs["x"].shape,
"x",
)
y = builder.create_input( y = builder.create_input(
self.nptype2cinntype(self.inputs["y"].dtype), self.nptype2cinntype(self.inputs["y"].dtype),
self.inputs["y"].shape, "y") self.inputs["y"].shape,
"y",
)
out = self.cinn_func(builder, x, y, axis=self.axis) out = self.cinn_func(builder, x, y, axis=self.axis)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x, y], res = self.get_cinn_output(
[self.inputs["x"], self.inputs["y"]], [out]) prog, target, [x, y], [self.inputs["x"], self.inputs["y"]], [out]
)
self.cinn_outputs = res self.cinn_outputs = res
...@@ -158,13 +170,17 @@ class TestMultiplyOp(TestBinaryOp): ...@@ -158,13 +170,17 @@ class TestMultiplyOp(TestBinaryOp):
class TestFloorDivideOp(TestBinaryOp): class TestFloorDivideOp(TestBinaryOp):
def get_x_data(self): def get_x_data(self):
# avoid random generate 0 # avoid random generate 0
return self.random([32, 64], 'int32', 1, 100) * np.random.choice( return (
[-1, 1], [1])[0] self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
def get_y_data(self): def get_y_data(self):
# avoid random generate 0 # avoid random generate 0
return self.random([32, 64], 'int32', 1, 100) * np.random.choice( return (
[-1, 1], [1])[0] self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
def paddle_func(self, x, y): def paddle_func(self, x, y):
return paddle.floor_divide(x, y) return paddle.floor_divide(x, y)
...@@ -183,12 +199,16 @@ class TestModOp(TestBinaryOp): ...@@ -183,12 +199,16 @@ class TestModOp(TestBinaryOp):
class TestModCase1(TestModOp): class TestModCase1(TestModOp):
def get_x_data(self): def get_x_data(self):
return self.random([32, 64], 'int32', 1, 100) * np.random.choice( return (
[-1, 1], [1])[0] self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
def get_y_data(self): def get_y_data(self):
return self.random([32, 64], 'int32', 1, 100) * np.random.choice( return (
[-1, 1], [1])[0] self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
class TestRemainderOp(TestBinaryOp): class TestRemainderOp(TestBinaryOp):
...@@ -202,12 +222,16 @@ class TestRemainderOp(TestBinaryOp): ...@@ -202,12 +222,16 @@ class TestRemainderOp(TestBinaryOp):
class TestRemainderCase1(TestRemainderOp): class TestRemainderCase1(TestRemainderOp):
def get_x_data(self): def get_x_data(self):
return self.random([32, 64], 'int32', 1, 100) * np.random.choice( return (
[-1, 1], [1])[0] self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
def get_y_data(self): def get_y_data(self):
return self.random([32, 64], 'int32', 1, 100) * np.random.choice( return (
[-1, 1], [1])[0] self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
class TestMaxOp(TestBinaryOp): class TestMaxOp(TestBinaryOp):
......
...@@ -23,8 +23,9 @@ from cinn.common import * ...@@ -23,8 +23,9 @@ from cinn.common import *
from struct import pack, unpack from struct import pack, unpack
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBitcastConvertOp(OpTest): class TestBitcastConvertOp(OpTest):
def setUp(self): def setUp(self):
self.init_case() self.init_case()
...@@ -35,9 +36,10 @@ class TestBitcastConvertOp(OpTest): ...@@ -35,9 +36,10 @@ class TestBitcastConvertOp(OpTest):
packed = pack(data.size * 'i', *data.flatten()) packed = pack(data.size * 'i', *data.flatten())
self.inputs = {"x": data} self.inputs = {"x": data}
self.outputs = { self.outputs = {
"y": np.array(unpack('12B', packed), dtype='uint8').reshape((3, 1, "y": np.array(unpack('12B', packed), dtype='uint8').reshape(
4)), (3, 1, 4)
"output_type": "uint8" ),
"output_type": "uint8",
} }
def build_paddle_program(self, target): def build_paddle_program(self, target):
...@@ -48,11 +50,12 @@ class TestBitcastConvertOp(OpTest): ...@@ -48,11 +50,12 @@ class TestBitcastConvertOp(OpTest):
builder = NetBuilder("bitcast_convert") builder = NetBuilder("bitcast_convert")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype), self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x") self.inputs["x"].shape,
"x",
)
out = builder.bitcast_convert(x, self.outputs["output_type"]) out = builder.bitcast_convert(x, self.outputs["output_type"])
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
[out])
self.cinn_outputs = [res[0]] self.cinn_outputs = [res[0]]
def test_check_results(self): def test_check_results(self):
...@@ -67,7 +70,7 @@ class TestBitcastConvertCase1(TestBitcastConvertOp): ...@@ -67,7 +70,7 @@ class TestBitcastConvertCase1(TestBitcastConvertOp):
self.inputs = {"x": data} self.inputs = {"x": data}
self.outputs = { self.outputs = {
"y": np.array(unpack('4i', packed), dtype='int32').reshape((4)), "y": np.array(unpack('4i', packed), dtype='int32').reshape((4)),
"output_type": "int32" "output_type": "int32",
} }
...@@ -78,9 +81,10 @@ class TestBitcastConvertCase2(TestBitcastConvertOp): ...@@ -78,9 +81,10 @@ class TestBitcastConvertCase2(TestBitcastConvertOp):
packed = pack(data.size * 'f', *data.flatten()) packed = pack(data.size * 'f', *data.flatten())
self.inputs = {"x": data} self.inputs = {"x": data}
self.outputs = { self.outputs = {
"y": np.array(unpack('12d', packed), dtype='float64').reshape((4, "y": np.array(unpack('12d', packed), dtype='float64').reshape(
3)), (4, 3)
"output_type": "float64" ),
"output_type": "float64",
} }
...@@ -91,11 +95,10 @@ class TestBitcastConvertCase3(TestBitcastConvertOp): ...@@ -91,11 +95,10 @@ class TestBitcastConvertCase3(TestBitcastConvertOp):
packed = pack(data.size * 'f', *data.flatten()) packed = pack(data.size * 'f', *data.flatten())
self.inputs = {"x": data} self.inputs = {"x": data}
self.outputs = { self.outputs = {
"y": "y": np.array(unpack('48H', packed), dtype='uint16').reshape(
np.array(unpack('48H', packed), dtype='uint16').reshape((4, 3, 2, (4, 3, 2, 2)
2)), ),
"output_type": "output_type": "uint16",
"uint16"
} }
......
...@@ -20,8 +20,9 @@ from cinn.frontend import * ...@@ -20,8 +20,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBitwiseOp(OpTest): class TestBitwiseOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -33,19 +34,23 @@ class TestBitwiseOp(OpTest): ...@@ -33,19 +34,23 @@ class TestBitwiseOp(OpTest):
self.x_np = np.full( self.x_np = np.full(
shape=self.case["x_shape"], shape=self.case["x_shape"],
fill_value=np.inf, fill_value=np.inf,
dtype=self.case["dtype"]) dtype=self.case["dtype"],
)
# Test with nan values # Test with nan values
elif "with_nan" in self.case: elif "with_nan" in self.case:
self.x_np = np.full( self.x_np = np.full(
shape=self.case["x_shape"], shape=self.case["x_shape"],
fill_value=np.nan, fill_value=np.nan,
dtype=self.case["dtype"]) dtype=self.case["dtype"],
)
else: else:
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["dtype"]) shape=self.case["x_shape"], dtype=self.case["dtype"]
)
if self.case["op_type"] != "not": if self.case["op_type"] != "not":
self.y_np = self.random( self.y_np = self.random(
shape=self.case["y_shape"], dtype=self.case["dtype"]) shape=self.case["y_shape"], dtype=self.case["dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False) x = paddle.to_tensor(self.x_np, stop_gradient=False)
...@@ -66,12 +71,14 @@ class TestBitwiseOp(OpTest): ...@@ -66,12 +71,14 @@ class TestBitwiseOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("bitwise") builder = NetBuilder("bitwise")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["dtype"]), self.case["x_shape"], "x"
"x") )
if self.case["op_type"] != "not": if self.case["op_type"] != "not":
y = builder.create_input( y = builder.create_input(
self.nptype2cinntype(self.case["dtype"]), self.case["y_shape"], self.nptype2cinntype(self.case["dtype"]),
"y") self.case["y_shape"],
"y",
)
if self.case["op_type"] == "and": if self.case["op_type"] == "and":
out = builder.bitwise_and(x, y) out = builder.bitwise_and(x, y)
elif self.case["op_type"] == "or": elif self.case["op_type"] == "or":
...@@ -84,15 +91,19 @@ class TestBitwiseOp(OpTest): ...@@ -84,15 +91,19 @@ class TestBitwiseOp(OpTest):
out = builder.identity(x) out = builder.identity(x)
prog = builder.build() prog = builder.build()
if self.case["op_type"] != "not": if self.case["op_type"] != "not":
res = self.get_cinn_output(prog, target, [x, y], res = self.get_cinn_output(
[self.x_np, self.y_np], [out]) prog, target, [x, y], [self.x_np, self.y_np], [out]
)
else: else:
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out]) res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -131,23 +142,13 @@ class TestBitwiseOpShape(TestCaseHelper): ...@@ -131,23 +142,13 @@ class TestBitwiseOpShape(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "int32"},
"dtype": "int32"
},
] ]
self.attrs = [ self.attrs = [
{ {"op_type": "and"},
"op_type": "and" {"op_type": "or"},
}, {"op_type": "xor"},
{ {"op_type": "not"},
"op_type": "or"
},
{
"op_type": "xor"
},
{
"op_type": "not"
},
] ]
...@@ -162,38 +163,18 @@ class TestBitwiseOpDtype(TestCaseHelper): ...@@ -162,38 +163,18 @@ class TestBitwiseOpDtype(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "bool"},
"dtype": "bool" {"dtype": "uint8"},
}, {"dtype": "int8"},
{ {"dtype": "int16"},
"dtype": "uint8" {"dtype": "int32"},
}, {"dtype": "int64"},
{
"dtype": "int8"
},
{
"dtype": "int16"
},
{
"dtype": "int32"
},
{
"dtype": "int64"
},
] ]
self.attrs = [ self.attrs = [
{ {"op_type": "and"},
"op_type": "and" {"op_type": "or"},
}, {"op_type": "xor"},
{ {"op_type": "not"},
"op_type": "or"
},
{
"op_type": "xor"
},
{
"op_type": "not"
},
] ]
......
...@@ -45,11 +45,11 @@ class TestBroadcastToOp(OpTest): ...@@ -45,11 +45,11 @@ class TestBroadcastToOp(OpTest):
builder = NetBuilder("BroadcastTo") builder = NetBuilder("BroadcastTo")
x = builder.create_input(Float(32), self.inputs["x"].shape, "x") x = builder.create_input(Float(32), self.inputs["x"].shape, "x")
out = builder.broadcast_to( out = builder.broadcast_to(
x, out_shape=self.out_shape, broadcast_axes=self.broadcast_axes) x, out_shape=self.out_shape, broadcast_axes=self.broadcast_axes
)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
[out])
self.cinn_outputs = res self.cinn_outputs = res
...@@ -119,8 +119,7 @@ class TestBroadcastToOpNoAxes(OpTest): ...@@ -119,8 +119,7 @@ class TestBroadcastToOpNoAxes(OpTest):
out = builder.broadcast_to(x, out_shape=self.out_shape) out = builder.broadcast_to(x, out_shape=self.out_shape)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
[out])
self.cinn_outputs = res self.cinn_outputs = res
......
...@@ -21,8 +21,9 @@ from cinn.frontend import * ...@@ -21,8 +21,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBroadcastToOp(OpTest): class TestBroadcastToOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -30,7 +31,8 @@ class TestBroadcastToOp(OpTest): ...@@ -30,7 +31,8 @@ class TestBroadcastToOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True) x = paddle.to_tensor(self.x_np, stop_gradient=True)
...@@ -41,12 +43,15 @@ class TestBroadcastToOp(OpTest): ...@@ -41,12 +43,15 @@ class TestBroadcastToOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("BroadcastTo") builder = NetBuilder("BroadcastTo")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.broadcast_to( out = builder.broadcast_to(
x, x,
out_shape=self.case["d_shape"], out_shape=self.case["d_shape"],
broadcast_axes=self.case["broadcast_axes"]) broadcast_axes=self.case["broadcast_axes"],
)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out]) res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])
...@@ -54,8 +59,11 @@ class TestBroadcastToOp(OpTest): ...@@ -54,8 +59,11 @@ class TestBroadcastToOp(OpTest):
self.cinn_outputs = [res[0]] self.cinn_outputs = [res[0]]
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -113,9 +121,9 @@ class TestBroadcastToAllTwo(TestCaseHelper): ...@@ -113,9 +121,9 @@ class TestBroadcastToAllTwo(TestCaseHelper):
{ {
"x_dtype": "bool", "x_dtype": "bool",
}, },
#{ # {
# "x_dtype": "int8", # "x_dtype": "int8",
#}, # },
{ {
"x_dtype": "int32", "x_dtype": "int32",
}, },
...@@ -142,7 +150,8 @@ class TestBroadcastToOpNoAxes(OpTest): ...@@ -142,7 +150,8 @@ class TestBroadcastToOpNoAxes(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True) x = paddle.to_tensor(self.x_np, stop_gradient=True)
...@@ -153,8 +162,10 @@ class TestBroadcastToOpNoAxes(OpTest): ...@@ -153,8 +162,10 @@ class TestBroadcastToOpNoAxes(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("BroadcastTo") builder = NetBuilder("BroadcastTo")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.broadcast_to(x, out_shape=self.case["d_shape"]) out = builder.broadcast_to(x, out_shape=self.case["d_shape"])
prog = builder.build() prog = builder.build()
...@@ -163,8 +174,11 @@ class TestBroadcastToOpNoAxes(OpTest): ...@@ -163,8 +174,11 @@ class TestBroadcastToOpNoAxes(OpTest):
self.cinn_outputs = [res[0]] self.cinn_outputs = [res[0]]
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -205,10 +219,10 @@ class TestBroadcastToOpNoAxesAllOne(TestCaseHelper): ...@@ -205,10 +219,10 @@ class TestBroadcastToOpNoAxesAllOne(TestCaseHelper):
"x_shape": [64, 32, 16, 8], "x_shape": [64, 32, 16, 8],
"d_shape": [128, 64, 32, 16, 8], "d_shape": [128, 64, 32, 16, 8],
}, },
#{ # {
# "x_shape": [128, 64, 32, 16, 8], # "x_shape": [128, 64, 32, 16, 8],
# "d_shape": [256, 128, 64, 32, 16, 8], # "d_shape": [256, 128, 64, 32, 16, 8],
#}, # },
] ]
self.dtypes = [ self.dtypes = [
{ {
......
...@@ -24,8 +24,9 @@ from cinn.frontend import * ...@@ -24,8 +24,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCastOp(OpTest): class TestCastOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -33,7 +34,8 @@ class TestCastOp(OpTest): ...@@ -33,7 +34,8 @@ class TestCastOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True) x = paddle.to_tensor(self.x_np, stop_gradient=True)
...@@ -46,8 +48,10 @@ class TestCastOp(OpTest): ...@@ -46,8 +48,10 @@ class TestCastOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("cast") builder = NetBuilder("cast")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.cast(x, self.case["d_dtype"]) out = builder.cast(x, self.case["d_dtype"])
prog = builder.build() prog = builder.build()
...@@ -56,8 +60,11 @@ class TestCastOp(OpTest): ...@@ -56,8 +60,11 @@ class TestCastOp(OpTest):
self.cinn_outputs = [res[0]] self.cinn_outputs = [res[0]]
def test_check_results(self): def test_check_results(self):
max_relative_error = self.case[ max_relative_error = (
"max_relative_error"] if "max_relative_error" in self.case else 1e-5 self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error) self.check_outputs_and_grads(max_relative_error=max_relative_error)
...@@ -79,21 +86,27 @@ class TestCastShape(TestCaseHelper): ...@@ -79,21 +86,27 @@ class TestCastShape(TestCaseHelper):
"x_shape": [16, 8, 4, 2], "x_shape": [16, 8, 4, 2],
}, },
] ]
self.dtypes = [{ self.dtypes = [
"x_dtype": "float32", {
}] "x_dtype": "float32",
self.attrs = [{ }
"d_dtype": "float64", ]
}] self.attrs = [
{
"d_dtype": "float64",
}
]
class TestCastDtype(TestCaseHelper): class TestCastDtype(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestCastOpCase" self.class_name = "TestCastOpCase"
self.cls = TestCastOp self.cls = TestCastOp
self.inputs = [{ self.inputs = [
"x_shape": [32, 64], {
}] "x_shape": [32, 64],
}
]
self.dtypes = [ self.dtypes = [
{ {
"x_dtype": "bool", "x_dtype": "bool",
...@@ -101,19 +114,12 @@ class TestCastDtype(TestCaseHelper): ...@@ -101,19 +114,12 @@ class TestCastDtype(TestCaseHelper):
{ {
"x_dtype": "int8", "x_dtype": "int8",
}, },
{ {"x_dtype": "int16"},
"x_dtype": "int16"
},
{ {
"x_dtype": "int32", "x_dtype": "int32",
}, },
{ {"x_dtype": "int64"},
"x_dtype": "int64" {"x_dtype": "float16", "max_relative_error": 1e-3},
},
{
"x_dtype": "float16",
"max_relative_error": 1e-3
},
{ {
"x_dtype": "float32", "x_dtype": "float32",
}, },
...@@ -128,19 +134,12 @@ class TestCastDtype(TestCaseHelper): ...@@ -128,19 +134,12 @@ class TestCastDtype(TestCaseHelper):
{ {
"d_dtype": "int8", "d_dtype": "int8",
}, },
{ {"d_dtype": "int16"},
"d_dtype": "int16"
},
{ {
"d_dtype": "int32", "d_dtype": "int32",
}, },
{ {"d_dtype": "int64"},
"d_dtype": "int64" {"d_dtype": "float16", "max_relative_error": 1e-3},
},
{
"d_dtype": "float16",
"max_relative_error": 1e-3
},
{ {
"d_dtype": "float32", "d_dtype": "float32",
}, },
......
...@@ -22,8 +22,9 @@ from op_test import OpTest, OpTestTool ...@@ -22,8 +22,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCbrtOp(OpTest): class TestCbrtOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -32,8 +33,9 @@ class TestCbrtOp(OpTest): ...@@ -32,8 +33,9 @@ class TestCbrtOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.inputs = { self.inputs = {
"x": "x": self.random(
self.random(self.case["shape"], self.case["dtype"], -100.0, 100.0), self.case["shape"], self.case["dtype"], -100.0, 100.0
),
} }
def build_paddle_program(self, target): def build_paddle_program(self, target):
...@@ -45,18 +47,20 @@ class TestCbrtOp(OpTest): ...@@ -45,18 +47,20 @@ class TestCbrtOp(OpTest):
builder = NetBuilder("cbrt") builder = NetBuilder("cbrt")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype), self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x") self.inputs["x"].shape,
"x",
)
out = builder.cbrt(x) out = builder.cbrt(x)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
[out])
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
self.check_outputs_and_grads( self.check_outputs_and_grads(
max_relative_error=1e-3, max_absolute_error=1e-3) max_relative_error=1e-3, max_absolute_error=1e-3
)
class TestCbrtOpShape(TestCaseHelper): class TestCbrtOpShape(TestCaseHelper):
...@@ -102,9 +106,7 @@ class TestCbrtOpShape(TestCaseHelper): ...@@ -102,9 +106,7 @@ class TestCbrtOpShape(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [] self.attrs = []
...@@ -125,15 +127,9 @@ class TestCbrtOpDtype(TestCaseHelper): ...@@ -125,15 +127,9 @@ class TestCbrtOpDtype(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float16"},
"dtype": "float16" {"dtype": "float32"},
}, {"dtype": "float64"},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
] ]
self.attrs = [] self.attrs = []
......
...@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool ...@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCeilOp(OpTest): class TestCeilOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -31,8 +32,9 @@ class TestCeilOp(OpTest): ...@@ -31,8 +32,9 @@ class TestCeilOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.inputs = { self.inputs = {
"x": "x": self.random(
self.random(self.case["shape"], self.case["dtype"], -100.0, 100.0), self.case["shape"], self.case["dtype"], -100.0, 100.0
),
} }
def build_paddle_program(self, target): def build_paddle_program(self, target):
...@@ -47,12 +49,13 @@ class TestCeilOp(OpTest): ...@@ -47,12 +49,13 @@ class TestCeilOp(OpTest):
builder = NetBuilder("ceil") builder = NetBuilder("ceil")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype), self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x") self.inputs["x"].shape,
"x",
)
out = builder.ceil(x) out = builder.ceil(x)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
[out])
self.cinn_outputs = res self.cinn_outputs = res
...@@ -103,9 +106,7 @@ class TestCeilOpShape(TestCaseHelper): ...@@ -103,9 +106,7 @@ class TestCeilOpShape(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [] self.attrs = []
...@@ -126,15 +127,9 @@ class TestCeilOpDtype(TestCaseHelper): ...@@ -126,15 +127,9 @@ class TestCeilOpDtype(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float16"},
"dtype": "float16" {"dtype": "float32"},
}, {"dtype": "float64"},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
] ]
self.attrs = [] self.attrs = []
......
...@@ -22,8 +22,9 @@ from op_test import OpTest, OpTestTool ...@@ -22,8 +22,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCholeskyOp(OpTest): class TestCholeskyOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -34,14 +35,16 @@ class TestCholeskyOp(OpTest): ...@@ -34,14 +35,16 @@ class TestCholeskyOp(OpTest):
if "batch_dim" in self.case and self.case["batch_dim"] > 0: if "batch_dim" in self.case and self.case["batch_dim"] > 0:
x = [] x = []
for _ in range(self.case["batch_dim"]): for _ in range(self.case["batch_dim"]):
matrix = self.random(self.case["shape"], self.case["dtype"], matrix = self.random(
-1.0, 1.0) self.case["shape"], self.case["dtype"], -1.0, 1.0
)
matrix_t = np.transpose(matrix, [1, 0]) matrix_t = np.transpose(matrix, [1, 0])
x.append(np.dot(matrix, matrix_t)) x.append(np.dot(matrix, matrix_t))
x = np.stack(x) x = np.stack(x)
else: else:
matrix = self.random(self.case["shape"], self.case["dtype"], -1.0, matrix = self.random(
1.0) self.case["shape"], self.case["dtype"], -1.0, 1.0
)
matrix_t = np.transpose(matrix, [1, 0]) matrix_t = np.transpose(matrix, [1, 0])
x = np.dot(matrix, matrix_t) x = np.dot(matrix, matrix_t)
self.inputs = {"x": x} self.inputs = {"x": x}
...@@ -56,11 +59,14 @@ class TestCholeskyOp(OpTest): ...@@ -56,11 +59,14 @@ class TestCholeskyOp(OpTest):
builder = NetBuilder("cholesky") builder = NetBuilder("cholesky")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype), self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x") self.inputs["x"].shape,
"x",
)
out = builder.cholesky(x, self.upper) out = builder.cholesky(x, self.upper)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output( res = self.get_cinn_output(
prog, target, [x], [self.inputs["x"]], [out], passes=[]) prog, target, [x], [self.inputs["x"]], [out], passes=[]
)
self.cinn_outputs = [res[0]] self.cinn_outputs = [res[0]]
def test_check_results(self): def test_check_results(self):
...@@ -83,14 +89,10 @@ class TestCholeskyOpShape(TestCaseHelper): ...@@ -83,14 +89,10 @@ class TestCholeskyOpShape(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [ self.attrs = [
{ {"upper": False},
"upper": False
},
] ]
...@@ -107,23 +109,12 @@ class TestCholeskyOpLargeShape(TestCaseHelper): ...@@ -107,23 +109,12 @@ class TestCholeskyOpLargeShape(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float64"},
"dtype": "float64"
},
] ]
self.attrs = [ self.attrs = [
{ {"upper": False, "batch_dim": 2},
"upper": False, {"upper": False, "batch_dim": 4},
"batch_dim": 2 {"upper": True, "batch_dim": 8},
},
{
"upper": False,
"batch_dim": 4
},
{
"upper": True,
"batch_dim": 8
},
] ]
...@@ -143,17 +134,11 @@ class TestCholeskyOpDtype(TestCaseHelper): ...@@ -143,17 +134,11 @@ class TestCholeskyOpDtype(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32" {"dtype": "float64"},
},
{
"dtype": "float64"
},
] ]
self.attrs = [ self.attrs = [
{ {"upper": False},
"upper": False
},
] ]
...@@ -173,23 +158,12 @@ class TestCholeskyOpBatch(TestCaseHelper): ...@@ -173,23 +158,12 @@ class TestCholeskyOpBatch(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [ self.attrs = [
{ {"upper": False, "batch_dim": 1},
"upper": False, {"upper": False, "batch_dim": 4},
"batch_dim": 1 {"upper": False, "batch_dim": 8},
},
{
"upper": False,
"batch_dim": 4
},
{
"upper": False,
"batch_dim": 8
},
] ]
...@@ -209,12 +183,8 @@ class TestCholeskyOpAttrs(TestCaseHelper): ...@@ -209,12 +183,8 @@ class TestCholeskyOpAttrs(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32" {"dtype": "float64"},
},
{
"dtype": "float64"
},
] ]
self.attrs = [ self.attrs = [
{ {
......
...@@ -37,7 +37,7 @@ def count_leading_zeros(integer, dtype): ...@@ -37,7 +37,7 @@ def count_leading_zeros(integer, dtype):
if integer < 0: if integer < 0:
return 0 return 0
mask = 1 << (bits - 1) mask = 1 << (bits - 1)
integer &= (mask - 1) integer &= mask - 1
clz = 0 clz = 0
while mask > 0 and integer & mask == 0: while mask > 0 and integer & mask == 0:
clz += 1 clz += 1
...@@ -45,8 +45,9 @@ def count_leading_zeros(integer, dtype): ...@@ -45,8 +45,9 @@ def count_leading_zeros(integer, dtype):
return clz return clz
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestClzOp(OpTest): class TestClzOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -59,8 +60,11 @@ class TestClzOp(OpTest): ...@@ -59,8 +60,11 @@ class TestClzOp(OpTest):
high = INT32_MAX if dtype == "int32" else INT64_MAX high = INT32_MAX if dtype == "int32" else INT64_MAX
x = self.random(self.case["shape"], dtype, low=low, high=high) x = self.random(self.case["shape"], dtype, low=low, high=high)
y = list( y = list(
map(lambda num: count_leading_zeros(num, dtype), map(
x.reshape(-1).tolist())) lambda num: count_leading_zeros(num, dtype),
x.reshape(-1).tolist(),
)
)
self.inputs = {"x": x} self.inputs = {"x": x}
self.outputs = {"y": np.array(y).reshape(x.shape).astype(dtype)} self.outputs = {"y": np.array(y).reshape(x.shape).astype(dtype)}
...@@ -72,11 +76,12 @@ class TestClzOp(OpTest): ...@@ -72,11 +76,12 @@ class TestClzOp(OpTest):
builder = NetBuilder("clz") builder = NetBuilder("clz")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype), self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x") self.inputs["x"].shape,
"x",
)
out = builder.clz(x) out = builder.clz(x)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
[out])
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
...@@ -126,12 +131,8 @@ class TestClzOpShapeDtype(TestCaseHelper): ...@@ -126,12 +131,8 @@ class TestClzOpShapeDtype(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "int32"},
"dtype": "int32" {"dtype": "int64"},
},
{
"dtype": "int64"
},
] ]
self.attrs = [] self.attrs = []
......
...@@ -19,8 +19,9 @@ from op_test import OpTest, OpTestTool ...@@ -19,8 +19,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestComparisonOp(OpTest): class TestComparisonOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -31,12 +32,12 @@ class TestComparisonOp(OpTest): ...@@ -31,12 +32,12 @@ class TestComparisonOp(OpTest):
if self.case["broadcast"]: if self.case["broadcast"]:
self.inputs = { self.inputs = {
"x": self.random(self.case["x_shape"], self.case["dtype"]), "x": self.random(self.case["x_shape"], self.case["dtype"]),
"y": self.random(self.case["y_shape"], self.case["dtype"]) "y": self.random(self.case["y_shape"], self.case["dtype"]),
} }
else: else:
self.inputs = { self.inputs = {
"x": self.random(self.case["shape"], self.case["dtype"]), "x": self.random(self.case["shape"], self.case["dtype"]),
"y": self.random(self.case["shape"], self.case["dtype"]) "y": self.random(self.case["shape"], self.case["dtype"]),
} }
self.operation = self.case["operation"] self.operation = self.case["operation"]
...@@ -63,10 +64,14 @@ class TestComparisonOp(OpTest): ...@@ -63,10 +64,14 @@ class TestComparisonOp(OpTest):
builder = NetBuilder("select") builder = NetBuilder("select")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype), self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x") self.inputs["x"].shape,
"x",
)
y = builder.create_input( y = builder.create_input(
self.nptype2cinntype(self.inputs["y"].dtype), self.nptype2cinntype(self.inputs["y"].dtype),
self.inputs["y"].shape, "y") self.inputs["y"].shape,
"y",
)
if self.operation == "equal": if self.operation == "equal":
out = builder.equal(x, y) out = builder.equal(x, y)
...@@ -83,8 +88,9 @@ class TestComparisonOp(OpTest): ...@@ -83,8 +88,9 @@ class TestComparisonOp(OpTest):
else: else:
raise NotImplementedError raise NotImplementedError
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x, y], res = self.get_cinn_output(
[self.inputs["x"], self.inputs["y"]], [out]) prog, target, [x, y], [self.inputs["x"], self.inputs["y"]], [out]
)
self.cinn_outputs = res self.cinn_outputs = res
def test_check_results(self): def test_check_results(self):
...@@ -144,43 +150,21 @@ class TestComparisonOpShape(TestCaseHelper): ...@@ -144,43 +150,21 @@ class TestComparisonOpShape(TestCaseHelper):
{ {
"shape": [131072], "shape": [131072],
}, },
{ {"shape": [1048576]},
"shape": [1048576]
},
{ {
"shape": [64, 32, 16, 8, 4], "shape": [64, 32, 16, 8, 4],
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [ self.attrs = [
{ {"operation": "equal", "broadcast": False},
"operation": "equal", {"operation": "not_equal", "broadcast": False},
"broadcast": False {"operation": "greater_than", "broadcast": False},
}, {"operation": "less_than", "broadcast": False},
{ {"operation": "greater_equal", "broadcast": False},
"operation": "not_equal", {"operation": "less_equal", "broadcast": False},
"broadcast": False
},
{
"operation": "greater_than",
"broadcast": False
},
{
"operation": "less_than",
"broadcast": False
},
{
"operation": "greater_equal",
"broadcast": False
},
{
"operation": "less_equal",
"broadcast": False
},
] ]
...@@ -197,50 +181,20 @@ class TestComparisonOpDtype(TestCaseHelper): ...@@ -197,50 +181,20 @@ class TestComparisonOpDtype(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float16"},
"dtype": "float16" {"dtype": "float32"},
}, {"dtype": "float64"},
{ {"dtype": "bool"},
"dtype": "float32" {"dtype": "int32"},
}, {"dtype": "int64"},
{
"dtype": "float64"
},
{
"dtype": "bool"
},
{
"dtype": "int32"
},
{
"dtype": "int64"
},
] ]
self.attrs = [ self.attrs = [
{ {"operation": "equal", "broadcast": False},
"operation": "equal", {"operation": "not_equal", "broadcast": False},
"broadcast": False {"operation": "greater_than", "broadcast": False},
}, {"operation": "less_than", "broadcast": False},
{ {"operation": "greater_equal", "broadcast": False},
"operation": "not_equal", {"operation": "less_equal", "broadcast": False},
"broadcast": False
},
{
"operation": "greater_than",
"broadcast": False
},
{
"operation": "less_than",
"broadcast": False
},
{
"operation": "greater_equal",
"broadcast": False
},
{
"operation": "less_equal",
"broadcast": False
},
] ]
...@@ -319,35 +273,15 @@ class TestComparisonOpBroadcastTest(TestCaseHelper): ...@@ -319,35 +273,15 @@ class TestComparisonOpBroadcastTest(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [ self.attrs = [
{ {"operation": "equal", "broadcast": True},
"operation": "equal", {"operation": "not_equal", "broadcast": True},
"broadcast": True {"operation": "greater_than", "broadcast": True},
}, {"operation": "less_than", "broadcast": True},
{ {"operation": "greater_equal", "broadcast": True},
"operation": "not_equal", {"operation": "less_equal", "broadcast": True},
"broadcast": True
},
{
"operation": "greater_than",
"broadcast": True
},
{
"operation": "less_than",
"broadcast": True
},
{
"operation": "greater_equal",
"broadcast": True
},
{
"operation": "less_equal",
"broadcast": True
},
] ]
......
...@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool ...@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestConcatOp(OpTest): class TestConcatOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -47,7 +48,8 @@ class TestConcatOp(OpTest): ...@@ -47,7 +48,8 @@ class TestConcatOp(OpTest):
def cinn_inputs(self, builder, inputs): def cinn_inputs(self, builder, inputs):
return [ return [
builder.create_input( builder.create_input(
self.nptype2cinntype(data.dtype), data.shape, name) self.nptype2cinntype(data.dtype), data.shape, name
)
for name, data in inputs.items() for name, data in inputs.items()
] ]
...@@ -67,8 +69,7 @@ class TestConcatOp(OpTest): ...@@ -67,8 +69,7 @@ class TestConcatOp(OpTest):
input_datas = [data for _, data in self.inputs.items()] input_datas = [data for _, data in self.inputs.items()]
res = self.get_cinn_output(prog, target, input_list, input_datas, res = self.get_cinn_output(prog, target, input_list, input_datas, [out])
[out])
self.cinn_outputs = res self.cinn_outputs = res
...@@ -119,14 +120,10 @@ class TestConcatOpShape(TestCaseHelper): ...@@ -119,14 +120,10 @@ class TestConcatOpShape(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [ self.attrs = [
{ {"axis": 0},
"axis": 0
},
] ]
...@@ -149,35 +146,17 @@ class TestConcatOpDtype(TestCaseHelper): ...@@ -149,35 +146,17 @@ class TestConcatOpDtype(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float16"},
"dtype": "float16" {"dtype": "float32"},
}, {"dtype": "float64"},
{ {"dtype": "bool"},
"dtype": "float32" {"dtype": "uint8"},
}, {"dtype": "int8"},
{ {"dtype": "int32"},
"dtype": "float64" {"dtype": "int64"},
},
{
"dtype": "bool"
},
{
"dtype": "uint8"
},
{
"dtype": "int8"
},
{
"dtype": "int32"
},
{
"dtype": "int64"
},
] ]
self.attrs = [ self.attrs = [
{ {"axis": 0},
"axis": 0
},
] ]
...@@ -187,80 +166,39 @@ class TestConcatOpMultipleInputs(TestCaseHelper): ...@@ -187,80 +166,39 @@ class TestConcatOpMultipleInputs(TestCaseHelper):
self.cls = TestConcatOp self.cls = TestConcatOp
self.inputs = [ self.inputs = [
# 1D tensor with 1~4 inputs # 1D tensor with 1~4 inputs
{ {"shapes": [[10]], "axis": 0},
"shapes": [[10]], {"shapes": [[10], [6]], "axis": 0},
"axis": 0 {"shapes": [[10], [6], [8]], "axis": 0},
}, {"shapes": [[10], [6], [10], [6]], "axis": 0},
{
"shapes": [[10], [6]],
"axis": 0
},
{
"shapes": [[10], [6], [8]],
"axis": 0
},
{
"shapes": [[10], [6], [10], [6]],
"axis": 0
},
# 2D tensor with 1~4 inputs # 2D tensor with 1~4 inputs
{ {"shapes": [[8, 5]], "axis": 1},
"shapes": [[8, 5]], {"shapes": [[8, 5], [8, 8]], "axis": 1},
"axis": 1 {"shapes": [[8, 5], [8, 5], [16, 5]], "axis": 0},
}, {"shapes": [[8, 5], [8, 5], [8, 5], [8, 5]], "axis": 0},
{
"shapes": [[8, 5], [8, 8]],
"axis": 1
},
{
"shapes": [[8, 5], [8, 5], [16, 5]],
"axis": 0
},
{
"shapes": [[8, 5], [8, 5], [8, 5], [8, 5]],
"axis": 0
},
# 3D tensor with 1~4 inputs # 3D tensor with 1~4 inputs
{ {"shapes": [[10, 3, 5]], "axis": 0},
"shapes": [[10, 3, 5]], {"shapes": [[10, 3, 5], [10, 7, 5]], "axis": 1},
"axis": 0 {"shapes": [[10, 3, 5], [10, 3, 6], [10, 3, 7]], "axis": 2},
}, {"shapes": [[10, 3, 5], [4, 3, 5], [2, 3, 5]], "axis": 0},
{
"shapes": [[10, 3, 5], [10, 7, 5]],
"axis": 1
},
{
"shapes": [[10, 3, 5], [10, 3, 6], [10, 3, 7]],
"axis": 2
},
{
"shapes": [[10, 3, 5], [4, 3, 5], [2, 3, 5]],
"axis": 0
},
# 4D tensor with 1~4 inputs # 4D tensor with 1~4 inputs
{ {"shapes": [[80, 1, 5, 7]], "axis": 0},
"shapes": [[80, 1, 5, 7]], {"shapes": [[80, 1, 5, 7], [80, 79, 5, 7]], "axis": 1},
"axis": 0
},
{
"shapes": [[80, 1, 5, 7], [80, 79, 5, 7]],
"axis": 1
},
{ {
"shapes": [[80, 1, 50, 7], [80, 1, 5, 7], [80, 1, 10, 7]], "shapes": [[80, 1, 50, 7], [80, 1, 5, 7], [80, 1, 10, 7]],
"axis": 2 "axis": 2,
}, },
{ {
"shapes": [[80, 1, 5, 17], [80, 1, 5, 27], [80, 1, 5, 37], "shapes": [
[80, 1, 5, 47]], [80, 1, 5, 17],
"axis": [80, 1, 5, 27],
3 [80, 1, 5, 37],
[80, 1, 5, 47],
],
"axis": 3,
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [] self.attrs = []
...@@ -271,86 +209,30 @@ class TestConcatOpAttrs(TestCaseHelper): ...@@ -271,86 +209,30 @@ class TestConcatOpAttrs(TestCaseHelper):
self.cls = TestConcatOp self.cls = TestConcatOp
self.inputs = [ self.inputs = [
# 1D tensor # 1D tensor
{ {"shapes": [[10], [8]], "axis": 0},
"shapes": [[10], [8]], {"shapes": [[10], [6]], "axis": -1},
"axis": 0
},
{
"shapes": [[10], [6]],
"axis": -1
},
# 2D tensor # 2D tensor
{ {"shapes": [[8, 5], [10, 5]], "axis": 0},
"shapes": [[8, 5], [10, 5]], {"shapes": [[8, 5], [8, 8]], "axis": 1},
"axis": 0
},
{
"shapes": [[8, 5], [8, 8]],
"axis": 1
},
# 3D tensor # 3D tensor
{ {"shapes": [[10, 3, 5], [10, 3, 5]], "axis": 0},
"shapes": [[10, 3, 5], [10, 3, 5]], {"shapes": [[10, 3, 5], [10, 7, 5]], "axis": 1},
"axis": 0 {"shapes": [[10, 3, 15], [10, 3, 5]], "axis": 2},
}, {"shapes": [[10, 3, 7], [10, 3, 5]], "axis": -1},
{ {"shapes": [[10, 3, 5], [10, 7, 5]], "axis": -2},
"shapes": [[10, 3, 5], [10, 7, 5]], {"shapes": [[10, 7, 5], [20, 7, 5]], "axis": -3},
"axis": 1
},
{
"shapes": [[10, 3, 15], [10, 3, 5]],
"axis": 2
},
{
"shapes": [[10, 3, 7], [10, 3, 5]],
"axis": -1
},
{
"shapes": [[10, 3, 5], [10, 7, 5]],
"axis": -2
},
{
"shapes": [[10, 7, 5], [20, 7, 5]],
"axis": -3
},
# 4D tensor # 4D tensor
{ {"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]], "axis": 0},
"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]], {"shapes": [[80, 1, 5, 7], [80, 79, 5, 7]], "axis": 1},
"axis": 0 {"shapes": [[80, 1, 5, 7], [80, 1, 10, 7]], "axis": 2},
}, {"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]], "axis": 3},
{ {"shapes": [[80, 1, 5, 7], [80, 1, 5, 13]], "axis": -1},
"shapes": [[80, 1, 5, 7], [80, 79, 5, 7]], {"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]], "axis": -2},
"axis": 1 {"shapes": [[80, 15, 5, 7], [80, 5, 5, 7]], "axis": -3},
}, {"shapes": [[80, 1, 5, 7], [20, 1, 5, 7]], "axis": -4},
{
"shapes": [[80, 1, 5, 7], [80, 1, 10, 7]],
"axis": 2
},
{
"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]],
"axis": 3
},
{
"shapes": [[80, 1, 5, 7], [80, 1, 5, 13]],
"axis": -1
},
{
"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]],
"axis": -2
},
{
"shapes": [[80, 15, 5, 7], [80, 5, 5, 7]],
"axis": -3
},
{
"shapes": [[80, 1, 5, 7], [20, 1, 5, 7]],
"axis": -4
},
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [] self.attrs = []
......
...@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool ...@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestConstantOp(OpTest): class TestConstantOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -104,9 +105,7 @@ class TestConstantOpShape(TestCaseHelper): ...@@ -104,9 +105,7 @@ class TestConstantOpShape(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float32"},
"dtype": "float32"
},
] ]
self.attrs = [] self.attrs = []
...@@ -130,30 +129,14 @@ class TestConstantOpDtype(TestCaseHelper): ...@@ -130,30 +129,14 @@ class TestConstantOpDtype(TestCaseHelper):
}, },
] ]
self.dtypes = [ self.dtypes = [
{ {"dtype": "float16"},
"dtype": "float16" {"dtype": "float32"},
}, {"dtype": "float64"},
{ {"dtype": "bool"},
"dtype": "float32" {"dtype": "uint8"},
}, {"dtype": "int8"},
{ {"dtype": "int32"},
"dtype": "float64" {"dtype": "int64"},
},
{
"dtype": "bool"
},
{
"dtype": "uint8"
},
{
"dtype": "int8"
},
{
"dtype": "int32"
},
{
"dtype": "int64"
},
] ]
self.attrs = [] self.attrs = []
......
此差异已折叠。
...@@ -23,8 +23,9 @@ from cinn.frontend import * ...@@ -23,8 +23,9 @@ from cinn.frontend import *
from cinn.common import * from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(), @OpTestTool.skip_if(
"x86 test will be skipped due to timeout.") not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCosOp(OpTest): class TestCosOp(OpTest):
def setUp(self): def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}") print(f"\nRunning {self.__class__.__name__}: {self.case}")
...@@ -32,7 +33,8 @@ class TestCosOp(OpTest): ...@@ -32,7 +33,8 @@ class TestCosOp(OpTest):
def prepare_inputs(self): def prepare_inputs(self):
self.x_np = self.random( self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"]) shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target): def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True) x = paddle.to_tensor(self.x_np, stop_gradient=True)
...@@ -42,8 +44,10 @@ class TestCosOp(OpTest): ...@@ -42,8 +44,10 @@ class TestCosOp(OpTest):
def build_cinn_program(self, target): def build_cinn_program(self, target):
builder = NetBuilder("unary_elementwise_test") builder = NetBuilder("unary_elementwise_test")
x = builder.create_input( x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], self.nptype2cinntype(self.case["x_dtype"]),
"x") self.case["x_shape"],
"x",
)
out = builder.cos(x) out = builder.cos(x)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out]) res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])
...@@ -58,24 +62,34 @@ class TestCosOpShape(TestCaseHelper): ...@@ -58,24 +62,34 @@ class TestCosOpShape(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestCosOpShape" self.class_name = "TestCosOpShape"
self.cls = TestCosOp self.cls = TestCosOp
self.inputs = [{ self.inputs = [
"x_shape": [1], {
}, { "x_shape": [1],
"x_shape": [1024], },
}, { {
"x_shape": [1, 2048], "x_shape": [1024],
}, { },
"x_shape": [1, 1, 1], {
}, { "x_shape": [1, 2048],
"x_shape": [32, 64], },
}, { {
"x_shape": [16, 8, 4, 2], "x_shape": [1, 1, 1],
}, { },
"x_shape": [16, 8, 4, 2, 1], {
}] "x_shape": [32, 64],
self.dtypes = [{ },
"x_dtype": "float32", {
}] "x_shape": [16, 8, 4, 2],
},
{
"x_shape": [16, 8, 4, 2, 1],
},
]
self.dtypes = [
{
"x_dtype": "float32",
}
]
self.attrs = [] self.attrs = []
...@@ -83,14 +97,13 @@ class TestCosOpDtype(TestCaseHelper): ...@@ -83,14 +97,13 @@ class TestCosOpDtype(TestCaseHelper):
def init_attrs(self): def init_attrs(self):
self.class_name = "TestCosOpDtype" self.class_name = "TestCosOpDtype"
self.cls = TestCosOp self.cls = TestCosOp
self.inputs = [{ self.inputs = [
"x_shape": [32, 64],
}]
self.dtypes = [
{ {
"x_dtype": "float16", "x_shape": [32, 64],
"max_relative_error": 1e-3 }
}, ]
self.dtypes = [
{"x_dtype": "float16", "max_relative_error": 1e-3},
{ {
"x_dtype": "float32", "x_dtype": "float32",
}, },
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册