未验证 提交 9aa82278 编写于 作者: 张经纬 提交者: GitHub

[CodeStyle][CINN] format python code via black (#54964)

上级 21518d2f
......@@ -25,7 +25,7 @@ def main():
srcs = []
srcs.append('#include <absl/strings/string_view.h>')
#srcs.append('#include "paddle/cinn/backends/llvm/cinn_runtime_llvm_ir.h"\n')
# srcs.append('#include "paddle/cinn/backends/llvm/cinn_runtime_llvm_ir.h"\n')
srcs.append('namespace cinn::backends {')
srcs.append("static const absl::string_view kRuntimeLlvmIr(")
srcs.append('R"ROC(')
......@@ -36,12 +36,19 @@ def main():
srcs.append(');\n')
cmd = "{} --version".format(llvm_config)
version = subprocess.check_output(
cmd, shell=True).decode('utf-8').strip().split('.')
version = (
subprocess.check_output(cmd, shell=True)
.decode('utf-8')
.strip()
.split('.')
)
srcs.append("struct llvm_version {")
for v, n in zip(["major", "minor", "micro"], version):
srcs.append(" static constexpr int k{} = {};".format(
v.title(), ''.join(filter(str.isdigit, n))))
srcs.append(
" static constexpr int k{} = {};".format(
v.title(), ''.join(filter(str.isdigit, n))
)
)
srcs.append("};")
srcs.append('} // namespace cinn::backends')
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import os
cinndir = os.path.dirname(os.path.abspath(__file__))
runtime_include_dir = os.path.join(cinndir, "libs")
cuhfile = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
......
......@@ -48,7 +48,8 @@ class XgbCostModel(object):
samples = np.concatenate(samples, axis=0)
if isinstance(labels, list):
labels = np.concatenate(
[[y] * length for y, length in zip(labels, lengths)], axis=0)
[[y] * length for y, length in zip(labels, lengths)], axis=0
)
dmatrix = xgb.DMatrix(data=samples, label=labels)
self.booster = xgb.train(self.xgb_param, dmatrix, self.train_round)
......@@ -77,7 +78,9 @@ class XgbCostModel(object):
Args:
path(str): path to save
"""
assert self.booster is not None, "Calling save on a XgbCostModel not been trained"
assert (
self.booster is not None
), "Calling save on a XgbCostModel not been trained"
self.booster.save_model(path)
def load(self, path):
......@@ -94,5 +97,5 @@ class XgbCostModel(object):
# But we should do that here if that's changable in the future.
def update(self, samples, labels):
#xgb doesn't support incremental training, we leave this method as TODO
# xgb doesn't support incremental training, we leave this method as TODO
pass
......@@ -1032,13 +1032,15 @@ def get_package_data_and_package_dir():
package_data['paddle.libs'] += ['cinn_cuda_runtime_source.cuh']
cinn_fp16_file = (
env_dict.get("CINN_INCLUDE_DIR") + '/paddle/cinn/runtime/cuda/float16.h'
env_dict.get("CINN_INCLUDE_DIR")
+ '/paddle/cinn/runtime/cuda/float16.h'
)
if os.path.exists(cinn_fp16_file):
shutil.copy(cinn_fp16_file, libs_path)
package_data['paddle.libs'] += ['float16.h']
cinn_bf16_file = (
env_dict.get("CINN_INCLUDE_DIR") + '/paddle/cinn/runtime/cuda/bfloat16.h'
env_dict.get("CINN_INCLUDE_DIR")
+ '/paddle/cinn/runtime/cuda/bfloat16.h'
)
if os.path.exists(cinn_bf16_file):
shutil.copy(cinn_bf16_file, libs_path)
......
......@@ -51,13 +51,15 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):
cin_index = 3
filter_size_new = [
filter_size[1] * input_shape[cin_index],
filter_size[0] // groups, filter_size[2], filter_size[3]
filter_size[0] // groups,
filter_size[2],
filter_size[3],
]
else:
filter_size_new = filter_size
param = paddle.nn.initializer.NumpyArrayInitializer(
np.array(
inputs_data[1]).reshape(filter_size_new).astype("float32"))
np.array(inputs_data[1]).reshape(filter_size_new).astype("float32")
)
# filter: (c_out, c_in // group, kernel_h, kernel_w)
filter_hw = list(filter_size_new[2:4])
if data_format == "NHWC":
......@@ -78,7 +80,8 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):
dilation=dilation,
groups=groups,
param_attr=param,
data_format=data_format)
data_format=data_format,
)
exe = static.Executor(paddle.CPUPlace())
exe.run(static.default_startup_program())
......
......@@ -26,7 +26,8 @@ a = static.data(name="A", shape=[-1, size], dtype='float32')
label = static.data(name="label", shape=[size], dtype='float32')
a1 = static.nn.fc(
x=a, size=size, activation="relu", bias_attr=None, num_flatten_dims=1)
x=a, size=size, activation="relu", bias_attr=None, num_flatten_dims=1
)
cost = paddle.nn.functional.square_error_cost(a1, label)
avg_cost = paddle.mean(cost)
......
......@@ -33,7 +33,8 @@ fc_out = static.nn.fc(
size=size,
activation="relu",
bias_attr=paddle.ParamAttr(name="fc_bias"),
num_flatten_dims=1)
num_flatten_dims=1,
)
for i in range(num_layers - 1):
fc_out = static.nn.fc(
......@@ -41,7 +42,8 @@ for i in range(num_layers - 1):
size=size,
activation="relu",
bias_attr=paddle.ParamAttr(name="fc_bias"),
num_flatten_dims=1)
num_flatten_dims=1,
)
cost = paddle.nn.functional.square_error_cost(fc_out, label)
avg_cost = paddle.mean(cost)
......
......@@ -22,15 +22,19 @@ import paddle.static as static
paddle.enable_static()
resnet_input = static.data(
name="resnet_input", shape=[1, 160, 7, 7], dtype='float32')
name="resnet_input", shape=[1, 160, 7, 7], dtype='float32'
)
label = static.data(name="label", shape=[1, 960, 7, 7], dtype='float32')
d = paddle.nn.functional.relu6(resnet_input)
f = static.nn.conv2d(
input=d, num_filters=960, filter_size=1, stride=1, padding=0, dilation=1)
input=d, num_filters=960, filter_size=1, stride=1, padding=0, dilation=1
)
g = static.nn.conv2d(
input=f, num_filters=160, filter_size=1, stride=1, padding=0, dilation=1)
input=f, num_filters=160, filter_size=1, stride=1, padding=0, dilation=1
)
i = static.nn.conv2d(
input=g, num_filters=960, filter_size=1, stride=1, padding=0, dilation=1)
input=g, num_filters=960, filter_size=1, stride=1, padding=0, dilation=1
)
j1 = paddle.scale(i, scale=2.0, bias=0.5)
j = paddle.scale(j1, scale=2.0, bias=0.5)
temp7 = paddle.nn.functional.relu(j)
......@@ -46,6 +50,7 @@ exe = static.Executor(cpu)
exe.run(static.default_startup_program())
fluid.io.save_inference_model("./resnet_model", [resnet_input.name], [temp7],
exe)
fluid.io.save_inference_model(
"./resnet_model", [resnet_input.name], [temp7], exe
)
print('res', temp7.name)
......@@ -25,38 +25,44 @@ class FusionTest(PassTest):
super(FusionTest, self).__init__(*args, **kwargs)
def init_input_data(self):
"""Set feed data
"""
"""Set feed data"""
self.feed_data = dict()
logger.warn("No Input Data")
def build_program(self, builder, target):
"""
"""
""" """
raise Exception("Not implemented.")
def check_fusion_outputs(self,
group_size,
max_relative_error=1e-5,
all_equal=False,
equal_nan=False):
def check_fusion_outputs(
self,
group_size,
max_relative_error=1e-5,
all_equal=False,
equal_nan=False,
):
base_passes = ["AutoCast", "Decomposer", "TransToCustomCallPass"]
fusion_passes = ["OpFusionPass", "FusionMergePass"]
real_group_size = self.get_pass_size(base_passes + fusion_passes)
logger.debug(
"The model has been fused into {} groups".format(real_group_size))
"The model has been fused into {} groups".format(real_group_size)
)
self.assertEqual(
real_group_size,
group_size,
msg=
"The model should be fused into {} groups, but actually fused {} groups"
.format(group_size, real_group_size))
msg="The model should be fused into {} groups, but actually fused {} groups".format(
group_size, real_group_size
),
)
cinn_no_fusion_outputs = self.get_pass_outputs(base_passes)
cinn_fusion_outputs = self.get_pass_outputs(base_passes +
fusion_passes)
cinn_fusion_outputs = self.get_pass_outputs(base_passes + fusion_passes)
logger.debug("============ Check Outputs ============")
self.check_results(cinn_no_fusion_outputs, cinn_fusion_outputs,
max_relative_error, all_equal, equal_nan)
self.check_results(
cinn_no_fusion_outputs,
cinn_fusion_outputs,
max_relative_error,
all_equal,
equal_nan,
)
......@@ -25,12 +25,15 @@ class TestGroup1(FusionTest):
def build_program(self, builder, target):
eager_in_tmp_8 = builder.create_input(
self.nptype2cinntype(self.feed_data['eager_in_tmp_8'].dtype),
self.feed_data['eager_in_tmp_8'].shape, "eager_in_tmp_8")
self.feed_data['eager_in_tmp_8'].shape,
"eager_in_tmp_8",
)
var_15 = builder.cast(eager_in_tmp_8, dtype="float16")
# cast should not fused into reduce when the output need fetch
var_73 = builder.broadcast_to(
var_15, broadcast_axes=[0, 1, 2, 3], out_shape=[32, 12, 128, 128])
var_15, broadcast_axes=[0, 1, 2, 3], out_shape=[32, 12, 128, 128]
)
var_55 = builder.cast(var_73, dtype="float32")
var_76 = builder.reduce_max(var_55, dim=[3], keep_dim=False)
......@@ -49,12 +52,15 @@ class TestGroup2(FusionTest):
def build_program(self, builder, target):
eager_in_tmp_8 = builder.create_input(
self.nptype2cinntype(self.feed_data['eager_in_tmp_8'].dtype),
self.feed_data['eager_in_tmp_8'].shape, "eager_in_tmp_8")
self.feed_data['eager_in_tmp_8'].shape,
"eager_in_tmp_8",
)
var_15 = builder.cast(eager_in_tmp_8, dtype="float16")
# cast should fused into reduce when the output not fetched
var_73 = builder.broadcast_to(
var_15, broadcast_axes=[0, 1, 2, 3], out_shape=[32, 12, 128, 128])
var_15, broadcast_axes=[0, 1, 2, 3], out_shape=[32, 12, 128, 128]
)
var_55 = builder.cast(var_73, dtype="float32")
var_76 = builder.reduce_max(var_55, dim=[3], keep_dim=False)
......
......@@ -22,9 +22,11 @@ class TestGroup1(FusionTest):
def build_program(self, builder, target):
x = builder.fill_constant(
dtype="float32", shape=[4, 5, 20, 20], value=1.00000000)
dtype="float32", shape=[4, 5, 20, 20], value=1.00000000
)
y = builder.cast(
builder.reduce_sum(x, dim=[2], keep_dim=False), "float16")
builder.reduce_sum(x, dim=[2], keep_dim=False), "float16"
)
feed_list = []
fetch_list = [y]
......
......@@ -27,13 +27,19 @@ class TestGroup1(FusionTest):
def build_program(self, builder, target):
cond = builder.create_input(
self.nptype2cinntype(self.feed_data['cond'].dtype),
self.feed_data['cond'].shape, "cond")
self.feed_data['cond'].shape,
"cond",
)
true_value = builder.create_input(
self.nptype2cinntype(self.feed_data['true_value'].dtype),
self.feed_data['true_value'].shape, "true_value")
self.feed_data['true_value'].shape,
"true_value",
)
false_value = builder.create_input(
self.nptype2cinntype(self.feed_data['false_value'].dtype),
self.feed_data['false_value'].shape, "false_value")
self.feed_data['false_value'].shape,
"false_value",
)
var_1 = builder.select(cond, true_value, false_value)
var_2 = builder.reduce_sum(var_1, dim=[2], keep_dim=False)
......
......@@ -27,6 +27,7 @@ from cinn.common import is_compiled_with_cuda
from cinn.framework import Scope
import sys
sys.path.append("/work/dev_CINN/build/python/tests")
from test.cinn.ops.op_test import OpTest, OpTestTool
......@@ -76,7 +77,7 @@ class OpMapperTest(OpTest):
x1 = paddle.static.data(name='x1', shape=[1, 2], dtype='float32')
x2 = paddle.static.data(name='x2', shape=[1, 2], dtype='float32')
return {'X' : [x1, x2]}
``` """
```"""
return dict()
def set_op_attrs(self) -> dict:
......@@ -136,29 +137,31 @@ class OpMapperTest(OpTest):
def __check_valid(self):
self.assertIsInstance(
self.op_type, str, msg="The op type should be a string")
self.op_type, str, msg="The op type should be a string"
)
self.assertNotEqual(
self.op_type, "", msg="The op type should not empty")
self.op_type, "", msg="The op type should not empty"
)
self.assertIsInstance(
self.inputs,
dict,
msg=
"The set_op_inputs should be return dict(InputName, list(Variable)), where Variable are created by paddle.static.data"
msg="The set_op_inputs should be return dict(InputName, list(Variable)), where Variable are created by paddle.static.data",
)
self.assertIsInstance(
self.attrs,
dict,
msg="The set_op_attrs should be return dict(AttrName, AttrValue)")
msg="The set_op_attrs should be return dict(AttrName, AttrValue)",
)
self.assertIsInstance(
self.output_dtypes,
dict,
msg=
"The set_op_outputs should be return dict(OutName, list(OutDtype)), where OutName and OutDtype are string"
msg="The set_op_outputs should be return dict(OutName, list(OutDtype)), where OutName and OutDtype are string",
)
self.assertGreater(
len(self.output_dtypes),
0,
msg="The set_op_outputs cannot return a empty dict")
msg="The set_op_outputs cannot return a empty dict",
)
for name, var in self.input_arg_map.items():
self.assertIn(name, self.feed_data)
......@@ -166,24 +169,30 @@ class OpMapperTest(OpTest):
var.shape,
self.feed_data[name].shape,
msg="The shape of input {} in feed_data is error".format(
var.name))
var.name
),
)
self.assertEqual(
self.paddleddtype2nptype(var.dtype),
str(self.feed_data[name].dtype),
msg="The dtype of input {} in feed_data is error".format(
var.name))
var.name
),
)
for out_name, in_name in self.inplace_outputs.items():
self.assertNotIn(
out_name,
self.output_dtypes,
msg=
"The {} should not declare twice because it's a inplace output, you should remove it from \"set_op_outputs\""
.format(out_name))
msg="The {} should not declare twice because it's a inplace output, you should remove it from \"set_op_outputs\"".format(
out_name
),
)
self.assertIn(
in_name,
self.inputs,
msg="The inplace var should existed in op' inputs dict")
msg="The inplace var should existed in op' inputs dict",
)
def __get_arguments_map(self, param_maps):
arg_maps = dict()
......@@ -191,18 +200,18 @@ class OpMapperTest(OpTest):
self.assertIsInstance(
args,
list,
msg=
"The type of arguments should be list(Variable), where Variable are created by paddle.static.data"
msg="The type of arguments should be list(Variable), where Variable are created by paddle.static.data",
)
for var in args:
self.assertIsInstance(
var,
PaddleVariable,
msg="The type of argument should be paddle.static.Variable"
msg="The type of argument should be paddle.static.Variable",
)
self.assertTrue(
(var.name not in arg_maps) or (arg_maps[var.name] == var),
msg="Argument %s is duplicated" % var.name)
msg="Argument %s is duplicated" % var.name,
)
arg_maps[var.name] = var
return arg_maps
......@@ -215,9 +224,14 @@ class OpMapperTest(OpTest):
for i in range(len(self.fetch_targets)):
if self.fetch_targets[i].name not in self.skip_check_list:
check_outputs.append(results[i])
logger.debug(msg="{}, shape={}, dtype={}:\n{}".format(
self.fetch_targets[i].name, results[i].shape,
str(results[i].dtype), results[i]))
logger.debug(
msg="{}, shape={}, dtype={}:\n{}".format(
self.fetch_targets[i].name,
results[i].shape,
str(results[i].dtype),
results[i],
)
)
return check_outputs
......@@ -225,8 +239,14 @@ class OpMapperTest(OpTest):
if logger.isEnabledFor(logging.DEBUG):
debug_info = ""
for k, v in info_dict.items():
debug_info += k + ", shape=" + str(v.shape) + ", dtype=" + str(
v.dtype) + ":\n"
debug_info += (
k
+ ", shape="
+ str(v.shape)
+ ", dtype="
+ str(v.dtype)
+ ":\n"
)
debug_info += str(v) + "\n"
logger.debug(title + ":\n" + debug_info)
......@@ -245,8 +265,7 @@ class OpMapperTest(OpTest):
self.assertIsInstance(
dtypes,
list,
msg=
"The set_op_outputs should be return dict(OutName, list(OutDtype)), where OutName and OutDtype are string"
msg="The set_op_outputs should be return dict(OutName, list(OutDtype)), where OutName and OutDtype are string",
)
self.outputs[var_name] = list()
for dtype in dtypes:
......@@ -268,7 +287,8 @@ class OpMapperTest(OpTest):
type=self.op_type,
inputs=self.inputs,
outputs=self.outputs,
attrs=self.attrs).desc
attrs=self.attrs,
).desc
logger.debug("Paddle Program:\n" + str(main_program))
......@@ -279,7 +299,8 @@ class OpMapperTest(OpTest):
main_program,
self.feed_data,
fetch_list=self.fetch_targets,
return_numpy=True)
return_numpy=True,
)
# NOTE: The unittest of `test_reduce_op`, `test_argmax_op`, `test_argmin_op` will
# output 0D-Tensor, hence we need to reshape them into 1D-Tensor temporarily.
......@@ -299,13 +320,15 @@ class OpMapperTest(OpTest):
convertor.create_input(
dtype=self.paddleddtype2nptype(var.dtype),
shape=var.shape,
name=var_name)
name=var_name,
)
convertor.append_op(
type=self.op_type,
inputs=self.op_desc.inputs(),
outputs=self.op_desc.outputs(),
attrs=self.attrs)
attrs=self.attrs,
)
prog = convertor()
......@@ -326,8 +349,10 @@ class OpMapperTest(OpTest):
self.assertIn(
cinn_name,
vars,
msg="Cannot find variable " + cinn_name +
" in cinn program's var list")
msg="Cannot find variable "
+ cinn_name
+ " in cinn program's var list",
)
cinn_inputs.append(vars[cinn_name])
cinn_feed_datas.append(self.feed_data[name])
......@@ -348,7 +373,8 @@ class OpMapperTest(OpTest):
# map the name the variable
self.assertGreater(
len(fetch_names), 0, msg="The program's output cannot be empty!")
len(fetch_names), 0, msg="The program's output cannot be empty!"
)
cinn_output_vars = list()
for name in fetch_names:
cinn_name = convertor.get_cinn_name(name)
......@@ -356,8 +382,10 @@ class OpMapperTest(OpTest):
self.assertIn(
cinn_name,
vars,
msg="Cannot find variable " + cinn_name +
" in cinn program's var list")
msg="Cannot find variable "
+ cinn_name
+ " in cinn program's var list",
)
cinn_output_vars.append(vars[cinn_name])
# run and get result
......@@ -368,7 +396,8 @@ class OpMapperTest(OpTest):
cinn_feed_datas,
cinn_output_vars,
passes=list(),
scope=scope)
scope=scope,
)
logger.debug(msg="CINN result:")
self.cinn_outputs = self.__remove_skip_outputs(results)
......
......@@ -40,7 +40,8 @@ class TestArgmaxOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......@@ -48,7 +49,7 @@ class TestArgmaxOp(OpMapperTest):
"axis": self.axis,
"flatten": self.flatten,
"keepdims": self.keepdims,
"dtype": self.nptype2paddledtype(self.output_dtype)
"dtype": self.nptype2paddledtype(self.output_dtype),
}
def set_op_outputs(self):
......@@ -77,7 +78,7 @@ class TestArgmaxCase1(TestArgmaxOp):
class TestArgmaxCase2(TestArgmaxOp):
"""
Test case with true keepdims
Test case with true keepdims
"""
def init_input_data(self):
......
......@@ -40,7 +40,8 @@ class TestArgminOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......@@ -48,7 +49,7 @@ class TestArgminOp(OpMapperTest):
"axis": self.axis,
"flatten": self.flatten,
"keepdims": self.keepdims,
"dtype": self.nptype2paddledtype(self.output_dtype)
"dtype": self.nptype2paddledtype(self.output_dtype),
}
def set_op_outputs(self):
......@@ -77,7 +78,7 @@ class TestArgminCase1(TestArgminOp):
class TestArgminCase2(TestArgminOp):
"""
Test case with true keepdims
Test case with true keepdims
"""
def init_input_data(self):
......
......@@ -34,7 +34,8 @@ class TestArgSortOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -44,8 +44,7 @@ class TestAssignValueOp(OpMapperTest):
else:
self.assertTrue(
False,
msg=
"The data type of 'input' must be bool, float32, int32 or int64"
msg="The data type of 'input' must be bool, float32, int32 or int64",
)
def set_op_attrs(self):
......@@ -54,7 +53,8 @@ class TestAssignValueOp(OpMapperTest):
value_name, values = self.convert_values(dtype, self.feed_data['x'])
return {
'dtype': self.nptype2paddledtype(
str(dtype)), # should keep the same as input
str(dtype)
), # should keep the same as input
'shape': shape,
value_name: values,
}
......
......@@ -34,11 +34,13 @@ class TestAtan2Op(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data(
name='y',
shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype)
dtype=self.feed_data['y'].dtype,
)
return {'X1': [x], 'X2': [y]}
def set_op_attrs(self):
......
......@@ -38,29 +38,34 @@ class TestBatchNormOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
scale = paddle.static.data(
name='scale',
shape=self.feed_data['scale'].shape,
dtype=self.feed_data['scale'].dtype)
dtype=self.feed_data['scale'].dtype,
)
bias = paddle.static.data(
name='bias',
shape=self.feed_data['bias'].shape,
dtype=self.feed_data['bias'].dtype)
dtype=self.feed_data['bias'].dtype,
)
mean = paddle.static.data(
name='mean',
shape=self.feed_data['mean'].shape,
dtype=self.feed_data['mean'].dtype)
dtype=self.feed_data['mean'].dtype,
)
variance = paddle.static.data(
name='variance',
shape=self.feed_data['variance'].shape,
dtype=self.feed_data['variance'].dtype)
dtype=self.feed_data['variance'].dtype,
)
return {
'X': [x],
'Scale': [scale],
'Bias': [bias],
'Mean': [mean],
'Variance': [variance]
'Variance': [variance],
}
def set_op_attrs(self):
......@@ -70,7 +75,7 @@ class TestBatchNormOp(OpMapperTest):
'data_layout': 'NCHW',
'is_test': False,
'trainable_statistics': False,
'use_global_stats': False
'use_global_stats': False,
}
def set_op_outputs(self):
......@@ -99,7 +104,7 @@ class TestBatchNormInferOp(TestBatchNormOp):
'data_layout': 'NCHW',
'is_test': True,
'trainable_statistics': False,
'use_global_stats': False
'use_global_stats': False,
}
def skip_check_outputs(self):
......
......@@ -34,11 +34,13 @@ class TestBitwiseOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data(
name='y',
shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype)
dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]}
def set_op_attrs(self):
......@@ -74,7 +76,8 @@ class TestBitwiseNotOp(TestBitwiseOp):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def test_check_results(self):
......
......@@ -34,7 +34,8 @@ class TestCholeskyOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -35,7 +35,8 @@ class TestClipOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......@@ -115,7 +116,7 @@ class TestClipOpMaxTensor(TestClipOp):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], "float32", -1.0, 1.0),
'max_input': self.random([1], "float32")
'max_input': self.random([1], "float32"),
}
self.min_val = -random.random()
self.max_val = random.random()
......@@ -124,11 +125,13 @@ class TestClipOpMaxTensor(TestClipOp):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
max_input = paddle.static.data(
name='max_input',
shape=self.feed_data['max_input'].shape,
dtype=self.feed_data['max_input'].dtype)
dtype=self.feed_data['max_input'].dtype,
)
return {'X': [x], 'Max': [max_input]}
......@@ -136,7 +139,7 @@ class TestClipOpMaxTensorInt32(TestClipOpMaxTensor):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], "int32"),
'max_input': self.random([1], "int32")
'max_input': self.random([1], "int32"),
}
self.min_val = -random.random()
self.max_val = random.random()
......@@ -146,7 +149,7 @@ class TestClipOpMaxTensorFloat64(TestClipOpMaxTensor):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], "float64"),
'max_input': self.random([1], "float64")
'max_input': self.random([1], "float64"),
}
self.min_val = -random.random()
self.max_val = random.random()
......@@ -156,7 +159,7 @@ class TestClipOpMaxTensorTypeCast(TestClipOpMaxTensor):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], "float64"),
'max_input': self.random([1], "float32")
'max_input': self.random([1], "float32"),
}
self.min_val = -random.random()
self.max_val = random.random()
......@@ -166,7 +169,7 @@ class TestClipOpMinTensor(TestClipOp):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], "float32"),
'min_input': self.random([1], "float32")
'min_input': self.random([1], "float32"),
}
self.min_val = -random.random()
self.max_val = random.random()
......@@ -175,11 +178,13 @@ class TestClipOpMinTensor(TestClipOp):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
min_input = paddle.static.data(
name='min_input',
shape=self.feed_data['min_input'].shape,
dtype=self.feed_data['min_input'].dtype)
dtype=self.feed_data['min_input'].dtype,
)
return {'X': [x], 'Min': [min_input]}
def set_op_attrs(self):
......@@ -190,7 +195,7 @@ class TestClipOpMinTensorInt32(TestClipOpMinTensor):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], "int32"),
'min_input': self.random([1], "int32")
'min_input': self.random([1], "int32"),
}
self.min_val = -random.random()
self.max_val = random.random()
......@@ -200,7 +205,7 @@ class TestClipOpMinTensorFloat64(TestClipOpMinTensor):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], "float64"),
'min_input': self.random([1], "float64")
'min_input': self.random([1], "float64"),
}
self.min_val = -random.random()
self.max_val = random.random()
......@@ -210,7 +215,7 @@ class TestClipOpMinTensorTypeCast(TestClipOpMinTensor):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], "float64"),
'min_input': self.random([1], "float32")
'min_input': self.random([1], "float32"),
}
self.min_val = -random.random()
self.max_val = random.random()
......
......@@ -34,11 +34,13 @@ class TestCompareOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data(
name='y',
shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype)
dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]}
def set_op_attrs(self):
......
......@@ -24,7 +24,7 @@ class TestConv2dOp(OpMapperTest):
def init_input_data(self):
self.feed_data = {
"x": self.random([3, 16, 32, 32], "float32"),
"weight": self.random([16, 16, 3, 3], "float32")
"weight": self.random([16, 16, 3, 3], "float32"),
}
self.data_format = 'NCHW'
......@@ -32,10 +32,14 @@ class TestConv2dOp(OpMapperTest):
return "conv2d"
def set_op_inputs(self):
x = paddle.static.data('x', self.feed_data["x"].shape,
self.feed_data["x"].dtype)
weight = paddle.static.data('weight', self.feed_data["weight"].shape,
self.feed_data["weight"].dtype)
x = paddle.static.data(
'x', self.feed_data["x"].shape, self.feed_data["x"].dtype
)
weight = paddle.static.data(
'weight',
self.feed_data["weight"].shape,
self.feed_data["weight"].dtype,
)
return {'Input': [x], 'Filter': [weight]}
def set_op_attrs(self):
......@@ -46,7 +50,7 @@ class TestConv2dOp(OpMapperTest):
"groups": 1,
"data_format": self.data_format,
"padding_algorithm": "EXPLICIT",
"use_cudnn": True
"use_cudnn": True,
}
def set_op_outputs(self):
......@@ -60,7 +64,7 @@ class TestConv2dNCHWFP16(TestConv2dOp):
def init_input_data(self):
self.feed_data = {
"x": self.random([3, 16, 32, 32], "float16"),
"weight": self.random([16, 16, 3, 3], "float16")
"weight": self.random([16, 16, 3, 3], "float16"),
}
self.data_format = 'NCHW'
......@@ -72,7 +76,7 @@ class TestConv2dNHWC(TestConv2dOp):
def init_input_data(self):
self.feed_data = {
"x": self.random([3, 32, 32, 16], "float32"),
"weight": self.random([16, 16, 3, 3], "float32")
"weight": self.random([16, 16, 3, 3], "float32"),
}
self.data_format = 'NHWC'
......@@ -81,7 +85,7 @@ class TestConv2dNHWCFP16(TestConv2dOp):
def init_input_data(self):
self.feed_data = {
"x": self.random([3, 32, 32, 16], "float16"),
"weight": self.random([16, 16, 3, 3], "float16")
"weight": self.random([16, 16, 3, 3], "float16"),
}
self.data_format = 'NHWC'
......
......@@ -33,7 +33,8 @@ class TestCumsumOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -24,7 +24,7 @@ class TestElementwiseOp(OpMapperTest):
def init_input_data(self):
self.feed_data = {
'x': self.random([32, 64], "float32"),
'y': self.random([32, 64], "float32")
'y': self.random([32, 64], "float32"),
}
def set_op_type(self):
......@@ -34,11 +34,13 @@ class TestElementwiseOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data(
name='y',
shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype)
dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]}
def set_op_attrs(self):
......@@ -95,7 +97,7 @@ class TestFloorDivOpCase1(TestElementwiseOp):
def init_input_data(self):
self.feed_data = {
'x': self.random([32, 64], low=1, high=10, dtype='int32'),
'y': self.random([32, 64], low=1, high=10, dtype='int32')
'y': self.random([32, 64], low=1, high=10, dtype='int32'),
}
def set_op_type(self):
......@@ -106,7 +108,7 @@ class TestFloorDivOpCase2(TestElementwiseOp):
def init_input_data(self):
self.feed_data = {
'x': self.random([32], low=1, high=10, dtype='int64'),
'y': self.random([32], low=1, high=10, dtype='int64')
'y': self.random([32], low=1, high=10, dtype='int64'),
}
def set_op_type(self):
......
......@@ -33,7 +33,8 @@ class TestExpandOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -33,7 +33,8 @@ class TestExpandV2Op(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -39,7 +39,7 @@ class TestFillConstantOp(OpMapperTest):
"shape": self.shape,
"value": float(self.value),
"str_value": self.str_value,
"dtype": self.nptype2paddledtype(self.dtype)
"dtype": self.nptype2paddledtype(self.dtype),
}
def set_op_outputs(self):
......@@ -72,7 +72,8 @@ class TestFillConstantByValueTensor(TestFillConstantOp):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {"ValueTensor": [x]}
......
......@@ -30,7 +30,8 @@ class TestFlipOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -24,7 +24,7 @@ class TestGatherNdOp(OpMapperTest):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], 'float32'),
'index': np.array([[1]], dtype='int32')
'index': np.array([[1]], dtype='int32'),
}
def set_op_type(self):
......@@ -34,11 +34,13 @@ class TestGatherNdOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
index = paddle.static.data(
name='index',
shape=self.feed_data['index'].shape,
dtype=self.feed_data['index'].dtype)
dtype=self.feed_data['index'].dtype,
)
return {'X': [x], 'Index': [index]}
def set_op_attrs(self):
......@@ -55,7 +57,7 @@ class TestGatherNdCase1(TestGatherNdOp):
def init_input_data(self):
self.feed_data = {
'x': self.random([2, 3, 4], 'float32'),
'index': np.array([[1, 2, 3]], dtype='int32')
'index': np.array([[1, 2, 3]], dtype='int32'),
}
......
......@@ -24,7 +24,7 @@ class TestGatherOp(OpMapperTest):
def init_input_data(self):
self.feed_data = {
'x': self.random([10, 12, 128, 128], 'float32'),
'index': self.random([5], 'int32', 0, 10)
'index': self.random([5], 'int32', 0, 10),
}
self.axis = 0
......@@ -35,11 +35,13 @@ class TestGatherOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
index = paddle.static.data(
name='index',
shape=self.feed_data['index'].shape,
dtype=self.feed_data['index'].dtype)
dtype=self.feed_data['index'].dtype,
)
return {'X': [x], 'Index': [index]}
def set_op_attrs(self):
......
......@@ -41,7 +41,7 @@ class TestGaussianRandomOp(OpMapperTest):
"std": self.std,
"seed": self.seed,
"shape": self.shape,
"dtype": self.nptype2paddledtype(self.dtype)
"dtype": self.nptype2paddledtype(self.dtype),
}
def set_op_outputs(self):
......
......@@ -37,15 +37,18 @@ class TestLayerNormOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
scale = paddle.static.data(
name='scale',
shape=self.feed_data['scale'].shape,
dtype=self.feed_data['scale'].dtype)
dtype=self.feed_data['scale'].dtype,
)
bias = paddle.static.data(
name='bias',
shape=self.feed_data['bias'].shape,
dtype=self.feed_data['bias'].dtype)
dtype=self.feed_data['bias'].dtype,
)
return {'X': [x], 'Scale': [scale], "Bias": [bias]}
def set_op_attrs(self):
......@@ -55,7 +58,7 @@ class TestLayerNormOp(OpMapperTest):
return {
'Y': [str(self.feed_data['x'].dtype)],
'Mean': [str(self.feed_data['scale'].dtype)],
'Variance': [str(self.feed_data['scale'].dtype)]
'Variance': [str(self.feed_data['scale'].dtype)],
}
def test_check_results(self):
......
......@@ -33,7 +33,8 @@ class TestLog1pOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -34,11 +34,13 @@ class TestLogicalOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data(
name='y',
shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype)
dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]}
def set_op_attrs(self):
......@@ -74,7 +76,8 @@ class TestLogicalNotOp(TestLogicalOp):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
......
......@@ -22,7 +22,7 @@ class TestLookupTableOp(OpMapperTest):
def init_input_data(self):
self.feed_data = {
"w": self.random([10, 3], "float32"),
"ids": self.random([5, 1], "int64", 0, 9)
"ids": self.random([5, 1], "int64", 0, 9),
}
def set_op_type(self):
......@@ -32,11 +32,13 @@ class TestLookupTableOp(OpMapperTest):
w = paddle.static.data(
name="w",
shape=self.feed_data["w"].shape,
dtype=self.feed_data["w"].dtype)
dtype=self.feed_data["w"].dtype,
)
ids = paddle.static.data(
name="ids",
shape=self.feed_data["ids"].shape,
dtype=self.feed_data["ids"].dtype)
dtype=self.feed_data["ids"].dtype,
)
return {"W": [w], "Ids": [ids]}
def set_op_attrs(self):
......@@ -53,7 +55,7 @@ class TestLookupTableOpCase1(TestLookupTableOp):
def init_input_data(self):
self.feed_data = {
"w": self.random([32, 64], "float64"),
"ids": self.random([10, 1], "int64", 0, 31)
"ids": self.random([10, 1], "int64", 0, 31),
}
def set_op_attrs(self):
......@@ -64,7 +66,7 @@ class TestLookupTableV2Op(OpMapperTest):
def init_input_data(self):
self.feed_data = {
"w": self.random([10, 3], "float32"),
"ids": self.random([5, 2], "int32", 0, 9)
"ids": self.random([5, 2], "int32", 0, 9),
}
def set_op_type(self):
......@@ -74,11 +76,13 @@ class TestLookupTableV2Op(OpMapperTest):
w = paddle.static.data(
name="w",
shape=self.feed_data["w"].shape,
dtype=self.feed_data["w"].dtype)
dtype=self.feed_data["w"].dtype,
)
ids = paddle.static.data(
name="ids",
shape=self.feed_data["ids"].shape,
dtype=self.feed_data["ids"].dtype)
dtype=self.feed_data["ids"].dtype,
)
return {"W": [w], "Ids": [ids]}
def set_op_attrs(self):
......@@ -95,7 +99,7 @@ class TestLookupTableV2OpCase1(TestLookupTableV2Op):
def init_input_data(self):
self.feed_data = {
"w": self.random([32, 64], "float64"),
"ids": self.random([10, 3], "int64", 0, 31)
"ids": self.random([10, 3], "int64", 0, 31),
}
def set_op_attrs(self):
......
......@@ -36,17 +36,19 @@ class TestMulOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data(
name='y',
shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype)
dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]}
def set_op_attrs(self):
return {
"x_num_col_dims": self.x_num_col_dims,
"y_num_col_dims": self.y_num_col_dims
"y_num_col_dims": self.y_num_col_dims,
}
def set_op_outputs(self):
......
......@@ -31,7 +31,8 @@ class TestNormOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......@@ -40,7 +41,7 @@ class TestNormOp(OpMapperTest):
def set_op_outputs(self):
return {
'Out': [str(self.feed_data['x'].dtype)],
"Norm": [str(self.feed_data['x'].dtype)]
"Norm": [str(self.feed_data['x'].dtype)],
}
def test_check_results(self):
......
......@@ -32,14 +32,15 @@ class TestOneHotOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
return {
"depth": self.depth,
"dtype": self.nptype2paddledtype(self.dtype),
"allow_out_of_range": self.allow_out_of_range
"allow_out_of_range": self.allow_out_of_range,
}
def set_op_outputs(self):
......@@ -79,14 +80,15 @@ class TestOneHotV2Op(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
return {
"depth": self.depth,
"dtype": self.nptype2paddledtype(self.dtype),
"allow_out_of_range": self.allow_out_of_range
"allow_out_of_range": self.allow_out_of_range,
}
def set_op_outputs(self):
......
......@@ -18,8 +18,9 @@ from op_mapper_test import OpMapperTest
from cinn.common import *
@unittest.skipIf(not is_compiled_with_cudnn(),
"x86 test will be skipped due to timeout.")
@unittest.skipIf(
not is_compiled_with_cudnn(), "x86 test will be skipped due to timeout."
)
class TestPool2dOp(OpMapperTest):
def init_input_data(self):
self.feed_data = {"x": self.random([2, 3, 7, 7], "float64")}
......@@ -42,7 +43,8 @@ class TestPool2dOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......@@ -57,7 +59,7 @@ class TestPool2dOp(OpMapperTest):
"ceil_mode": self.ceil_mode,
"data_format": self.data_format,
"padding_algorithm": self.padding_algorithm,
"use_cudnn": self.use_cudnn
"use_cudnn": self.use_cudnn,
}
def set_op_outputs(self):
......
......@@ -34,11 +34,13 @@ class TestPowOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
factor = paddle.static.data(
name='factor',
shape=self.feed_data['factor'].shape,
dtype=self.feed_data['factor'].dtype)
dtype=self.feed_data['factor'].dtype,
)
return {'X': [x], 'FactorTensor': [factor]}
def set_op_attrs(self):
......@@ -72,7 +74,8 @@ class TestPowOpInFactorAttr(TestPowOp):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -41,7 +41,7 @@ class TestRandIntOp(OpMapperTest):
"high": self.max,
"seed": self.seed,
"shape": self.shape,
"dtype": self.nptype2paddledtype(self.dtype)
"dtype": self.nptype2paddledtype(self.dtype),
}
def set_op_outputs(self):
......
......@@ -33,7 +33,8 @@ class TestReduceOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......@@ -124,7 +125,7 @@ class TestReduceOutType(TestReduceOp):
return {
"dim": self.dim,
"keep_dim": self.keepdim,
"out_dtype": self.nptype2paddledtype("float64")
"out_dtype": self.nptype2paddledtype("float64"),
}
......@@ -133,7 +134,7 @@ class TestReduceUnkOutType(TestReduceOp):
return {
"dim": self.dim,
"keep_dim": self.keepdim,
"out_dtype": self.nptype2paddledtype("unk")
"out_dtype": self.nptype2paddledtype("unk"),
}
......
......@@ -30,7 +30,8 @@ class TestReverseOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -35,7 +35,8 @@ class TestRollOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -34,14 +34,15 @@ class TestScaleOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
return {
"scale": self.scale,
"bias": self.bias,
"bias_after_scale": self.bias_after_scale
"bias_after_scale": self.bias_after_scale,
}
def set_op_outputs(self):
......@@ -71,7 +72,7 @@ class TestScaleWithScaleTensor(TestScaleOp):
def init_input_data(self):
self.feed_data = {
'x': self.random([32, 64], "float32"),
"scale": self.random([1], "float32", 2.0, 10.0)
"scale": self.random([1], "float32", 2.0, 10.0),
}
self.bias = 2.0
self.bias_after_scale = True
......@@ -80,11 +81,13 @@ class TestScaleWithScaleTensor(TestScaleOp):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
scale = paddle.static.data(
name='scale',
shape=self.feed_data['scale'].shape,
dtype=self.feed_data['scale'].dtype)
dtype=self.feed_data['scale'].dtype,
)
return {'X': [x], "ScaleTensor": [scale]}
def set_op_attrs(self):
......@@ -95,7 +98,7 @@ class TestScaleWithScaleTensorCase1(TestScaleWithScaleTensor):
def init_input_data(self):
self.feed_data = {
'x': self.random([32, 64], "float32"),
"scale": self.random([1], "float32", 2.0, 10.0)
"scale": self.random([1], "float32", 2.0, 10.0),
}
self.bias = 0.0
self.bias_after_scale = True
......@@ -105,7 +108,7 @@ class TestScaleWithScaleTensorCase2(TestScaleWithScaleTensor):
def init_input_data(self):
self.feed_data = {
'x': self.random([32, 64], "int32"),
"scale": self.random([1], "float32", 2.0, 10.0)
"scale": self.random([1], "float32", 2.0, 10.0),
}
self.bias = 0.0
self.bias_after_scale = True
......
......@@ -27,13 +27,10 @@ class TestScatterOp(OpMapperTest):
dim1 = 10
x_data = self.random([dim0, dim1], "float32")
ids_data = np.random.randint(
0, dim0, [random.randint(1, 5)], dtype=np.int32)
0, dim0, [random.randint(1, 5)], dtype=np.int32
)
updates_data = self.random([len(ids_data), dim1], "float32")
self.feed_data = {
'x': x_data,
'ids': ids_data,
'updates': updates_data
}
self.feed_data = {'x': x_data, 'ids': ids_data, 'updates': updates_data}
def set_op_type(self):
return "scatter"
......@@ -42,15 +39,18 @@ class TestScatterOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
ids = paddle.static.data(
name='ids',
shape=self.feed_data['ids'].shape,
dtype=self.feed_data['ids'].dtype)
dtype=self.feed_data['ids'].dtype,
)
updates = paddle.static.data(
name='updates',
shape=self.feed_data['updates'].shape,
dtype=self.feed_data['updates'].dtype)
dtype=self.feed_data['updates'].dtype,
)
return {'X': [x], 'Ids': [ids], 'Updates': [updates]}
def set_op_attrs(self):
......@@ -69,15 +69,12 @@ class TestScatterOpOverWrite(TestScatterOp):
dim1 = 10
x_data = self.random([dim0, dim1], "float32")
ids_data = np.random.randint(
0, dim0, [random.randint(1, 10)], dtype=np.int32)
0, dim0, [random.randint(1, 10)], dtype=np.int32
)
# remove duplicate elements, because paddle has undetermined behavior for duplicate elements
ids_data = np.unique(ids_data)
updates_data = self.random([len(ids_data), dim1], "float32")
self.feed_data = {
'x': x_data,
'ids': ids_data,
'updates': updates_data
}
self.feed_data = {'x': x_data, 'ids': ids_data, 'updates': updates_data}
def set_op_attrs(self):
return {'overwrite': True}
......
......@@ -33,7 +33,8 @@ class TestSignOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -35,7 +35,8 @@ class TestSplitOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -33,7 +33,8 @@ class TestSqueezeOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......@@ -42,7 +43,7 @@ class TestSqueezeOp(OpMapperTest):
def set_op_outputs(self):
return {
'Out': [str(self.feed_data['x'].dtype)],
"XShape": [str(self.feed_data['x'].dtype)]
"XShape": [str(self.feed_data['x'].dtype)],
}
def skip_check_outputs(self):
......
......@@ -35,7 +35,8 @@ class TestStackOp(OpMapperTest):
paddle.static.data(
name=var_name,
shape=self.feed_data[var_name].shape,
dtype=self.feed_data[var_name].dtype)
dtype=self.feed_data[var_name].dtype,
)
for var_name in self.feed_data.keys()
]
return {'X': x}
......
......@@ -38,7 +38,8 @@ class TestStridedSliceOp(OpMapperTest):
inputs = paddle.static.data(
name='inputs',
shape=self.feed_data['inputs'].shape,
dtype=self.feed_data['inputs'].dtype)
dtype=self.feed_data['inputs'].dtype,
)
return {'Input': [inputs]}
def set_op_attrs(self):
......@@ -47,7 +48,7 @@ class TestStridedSliceOp(OpMapperTest):
"starts": self.starts,
"ends": self.ends,
"strides": self.strides,
"infer_flags": self.infer_flags
"infer_flags": self.infer_flags,
}
def set_op_outputs(self):
......
......@@ -43,25 +43,31 @@ class TestTakeAlongAxisOp(OpMapperTest):
def set_op_inputs(self):
broadcast_shape = infer_broadcast_shape(
self.feed_data['x'], self.feed_data['index'], self.axis)
self.feed_data['x'], self.feed_data['index'], self.axis
)
if not broadcast_shape:
broadcast_shape = self.feed_data['index'].shape
self.feed_data['index'] = np.broadcast_to(self.feed_data['index'],
broadcast_shape).copy()
self.feed_data['index'] = np.broadcast_to(
self.feed_data['index'], broadcast_shape
).copy()
broadcast_shape_list = list(broadcast_shape)
broadcast_shape_list[self.axis] = list(
self.feed_data['x'].shape)[self.axis]
broadcast_shape_list[self.axis] = list(self.feed_data['x'].shape)[
self.axis
]
broadcast_shape = tuple(broadcast_shape_list)
self.feed_data['x'] = np.broadcast_to(self.feed_data['x'],
broadcast_shape).copy()
self.feed_data['x'] = np.broadcast_to(
self.feed_data['x'], broadcast_shape
).copy()
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
index = paddle.static.data(
name='index',
shape=self.feed_data['index'].shape,
dtype=self.feed_data['index'].dtype)
dtype=self.feed_data['index'].dtype,
)
return {'Input': [x], 'Index': [index]}
def set_op_attrs(self):
......
......@@ -34,7 +34,8 @@ class TestTileOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -37,7 +37,8 @@ class TestTranspose2Op(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......@@ -46,7 +47,7 @@ class TestTranspose2Op(OpMapperTest):
def set_op_outputs(self):
return {
'Out': [str(self.feed_data['x'].dtype)],
'XShape': [str(self.feed_data['x'].dtype)]
'XShape': [str(self.feed_data['x'].dtype)],
}
def skip_check_outputs(self):
......
......@@ -23,7 +23,7 @@ class TestTriangularSolveOp(OpMapperTest):
def init_input_data(self):
self.feed_data = {
'x': self.random([32, 32], "float32"),
'y': self.random([32, 128], "float32")
'y': self.random([32, 128], "float32"),
}
def set_op_type(self):
......@@ -33,11 +33,13 @@ class TestTriangularSolveOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data(
name='y',
shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype)
dtype=self.feed_data['y'].dtype,
)
return {'X': [x], 'Y': [y]}
def set_op_attrs(self):
......
......@@ -31,7 +31,8 @@ class TestUnaryOp(OpMapperTest):
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
return {'X': [x]}
def set_op_attrs(self):
......
......@@ -47,7 +47,7 @@ class TestUniformRandomOp(OpMapperTest):
"dtype": self.nptype2paddledtype(self.dtype),
"diag_num": self.diag_num,
"diag_step": self.diag_step,
"diag_val": self.diag_val
"diag_val": self.diag_val,
}
def set_op_outputs(self):
......
......@@ -35,15 +35,18 @@ class TestWhereOp(OpMapperTest):
condition = paddle.static.data(
name='condition',
shape=self.feed_data['condition'].shape,
dtype=self.feed_data['condition'].dtype)
dtype=self.feed_data['condition'].dtype,
)
x = paddle.static.data(
name='x',
shape=self.feed_data['x'].shape,
dtype=self.feed_data['x'].dtype)
dtype=self.feed_data['x'].dtype,
)
y = paddle.static.data(
name='y',
shape=self.feed_data['y'].shape,
dtype=self.feed_data['y'].dtype)
dtype=self.feed_data['y'].dtype,
)
return {'Condition': [condition], 'X': [x], "Y": [y]}
def set_op_attrs(self):
......
......@@ -95,17 +95,13 @@ class OpTest(unittest.TestCase):
def build_cinn_program(self, target):
raise Exception("Not implemented.")
def get_cinn_output(self,
prog,
target,
inputs,
feed_data,
outputs,
passes=[],
scope=None):
def get_cinn_output(
self, prog, target, inputs, feed_data, outputs, passes=[], scope=None
):
fetch_ids = {str(out) for out in outputs}
result = prog.build_and_get_output(
target, inputs, feed_data, outputs, passes=passes, scope=scope)
target, inputs, feed_data, outputs, passes=passes, scope=scope
)
outs_and_grads = []
for res in result:
outs_and_grads.append(res.numpy(target))
......@@ -127,33 +123,49 @@ class OpTest(unittest.TestCase):
logger.debug("============ After Decomposer Pass ============")
print_program(prog)
def check_outputs_and_grads(self,
max_relative_error=1e-5,
max_absolute_error=1e-6,
all_equal=False,
equal_nan=False):
def check_outputs_and_grads(
self,
max_relative_error=1e-5,
max_absolute_error=1e-6,
all_equal=False,
equal_nan=False,
):
self.build_paddle_program(self.target)
self.build_cinn_program(self.target)
logger.debug("============ Check Outputs ============")
self.check_results(self.paddle_outputs, self.cinn_outputs,
max_relative_error, max_absolute_error, all_equal,
equal_nan, "Outputs")
self.check_results(
self.paddle_outputs,
self.cinn_outputs,
max_relative_error,
max_absolute_error,
all_equal,
equal_nan,
"Outputs",
)
if len(self.cinn_grads) != 0:
logger.debug("============ Check Grads ============")
self.check_results(self.paddle_grads, self.cinn_grads,
max_relative_error, max_absolute_error,
all_equal, equal_nan, "Grads")
def check_results(self,
expect_res,
actual_res,
max_relative_error,
max_absolute_error,
all_equal=False,
equal_nan=False,
name="Outputs"):
self.check_results(
self.paddle_grads,
self.cinn_grads,
max_relative_error,
max_absolute_error,
all_equal,
equal_nan,
"Grads",
)
def check_results(
self,
expect_res,
actual_res,
max_relative_error,
max_absolute_error,
all_equal=False,
equal_nan=False,
name="Outputs",
):
def _compute_error_message(output_id, expect, actual):
absolute_diff = np.abs(expect - actual).flatten()
relative_diff = absolute_diff / np.abs(expect).flatten()
......@@ -166,26 +178,40 @@ class OpTest(unittest.TestCase):
max_relative_diff = relative_diff[i]
if absolute_diff[i] > max_absolute_diff:
max_absolute_diff = absolute_diff[i]
if relative_diff[i] > max_relative_error or absolute_diff[
i] > max_absolute_error:
if (
relative_diff[i] > max_relative_error
or absolute_diff[i] > max_absolute_error
):
num_diffs = num_diffs + 1
offset = i if offset == -1 else offset
# The following print can be used to debug.
# print("i=%d, %e vs %e, relative_diff=%e, absolute_diff=%e" % (i, expect.flatten()[i], actual.flatten()[i], relative_diff[i], absolute_diff[i]))
error_message = "[%s] The %d-th output: total %d different results, offset=%d, shape=%s, %e vs %e. Maximum diff of the whole array: maximum_relative_diff=%e, maximum_absolute_diff=%e." % (
self._get_device(), output_id, num_diffs, offset,
str(expect.shape), expect.flatten()[offset],
actual.flatten()[offset], max_relative_diff, max_absolute_diff)
error_message = (
"[%s] The %d-th output: total %d different results, offset=%d, shape=%s, %e vs %e. Maximum diff of the whole array: maximum_relative_diff=%e, maximum_absolute_diff=%e."
% (
self._get_device(),
output_id,
num_diffs,
offset,
str(expect.shape),
expect.flatten()[offset],
actual.flatten()[offset],
max_relative_diff,
max_absolute_diff,
)
)
return error_message
def _check_error_message(output_id, expect, actual):
expect_flatten = expect.flatten()
actual_flatten = actual.flatten()
self.assertEqual(
len(expect_flatten), len(actual_flatten),
"[{}] The {}-th output size different, which expect shape is {} but actual is {}."
.format(self._get_device(), output_id, expect.shape,
actual.shape))
len(expect_flatten),
len(actual_flatten),
"[{}] The {}-th output size different, which expect shape is {} but actual is {}.".format(
self._get_device(), output_id, expect.shape, actual.shape
),
)
num_diffs = 0
offset = -1
for i in range(len(expect_flatten)):
......@@ -194,8 +220,13 @@ class OpTest(unittest.TestCase):
offset = i if offset == -1 else offset
error_message = "[{}] The {}-th output: total {} different results, the first different result's offset={}, where expect value is {} but actual is {}.".format(
self._get_device(), output_id, num_diffs, offset,
expect_flatten[offset], actual_flatten[offset])
self._get_device(),
output_id,
num_diffs,
offset,
expect_flatten[offset],
actual_flatten[offset],
)
return error_message
self.assertEqual(len(expect_res), len(actual_res))
......@@ -218,24 +249,25 @@ class OpTest(unittest.TestCase):
self.assertEqual(
expect.dtype,
actual.dtype,
msg=
"[{}] The {}-th output dtype different, which expect shape is {} but actual is {}."
.format(self._get_device(), i, expect.dtype, actual.dtype))
msg="[{}] The {}-th output dtype different, which expect shape is {} but actual is {}.".format(
self._get_device(), i, expect.dtype, actual.dtype
),
)
# NOTE: Paddle's 0D Tensor will be changed to 1D when calling tensor.numpy(),
# only check non-0D Tensor's shape here. 0D-Tensor's shape will be verified by `test_zero_dim_tensor.py`
if len(expect.shape) != 0 and len(actual.shape) != 0:
self.assertEqual(
expect.shape,
actual.shape,
msg=
"[{}] The {}-th output shape different, which expect shape is {} but actual is {}."
.format(self._get_device(), i, expect.shape, actual.shape))
msg="[{}] The {}-th output shape different, which expect shape is {} but actual is {}.".format(
self._get_device(), i, expect.shape, actual.shape
),
)
should_all_equal = all_equal or (actual.dtype in [
np.dtype('bool'),
np.dtype('int32'),
np.dtype('int64')
])
should_all_equal = all_equal or (
actual.dtype
in [np.dtype('bool'), np.dtype('int32'), np.dtype('int64')]
)
if expect.dtype == np.uint16:
expect_float = convert_uint16_to_float(expect)
......@@ -250,17 +282,24 @@ class OpTest(unittest.TestCase):
actual,
atol=max_absolute_error,
rtol=max_relative_error,
equal_nan=equal_nan)
equal_nan=equal_nan,
)
# _compute_error_message checks which values have absolute or relative error
error_message = "np.allclose(expect, actual, atol={}, rtol={}) checks succeed!".format(
max_absolute_error, max_relative_error
) if is_allclose else _compute_error_message(
i, expect, actual)
error_message = (
"np.allclose(expect, actual, atol={}, rtol={}) checks succeed!".format(
max_absolute_error, max_relative_error
)
if is_allclose
else _compute_error_message(i, expect, actual)
)
else:
is_allclose = np.all(expect == actual)
# _check_error_message checks which values are not equal
error_message = "(expect == actual) checks succeed!" if is_allclose else _check_error_message(
i, expect, actual)
error_message = (
"(expect == actual) checks succeed!"
if is_allclose
else _check_error_message(i, expect, actual)
)
error_message = "[Check " + name + "] " + error_message
......@@ -285,7 +324,7 @@ class OpTest(unittest.TestCase):
# "uint16": UInt(16),
"uint32": UInt(32),
"uint64": UInt(64),
"bool": Bool()
"bool": Bool(),
}
assert str(dtype) in switch_map, str(dtype) + " not support in CINN"
return switch_map[str(dtype)]
......@@ -302,12 +341,19 @@ class OpTest(unittest.TestCase):
return np.random.uniform(low, high, shape).astype(dtype)
elif dtype == "bfloat16":
return convert_float_to_uint16(
np.random.uniform(low, high, shape).astype("float32"))
np.random.uniform(low, high, shape).astype("float32")
)
elif dtype == "bool":
return np.random.choice(a=[False, True], size=shape).astype(dtype)
elif dtype in [
"uint8", "uint16", "uint32", "uint64", "int8", "int16",
"int32", "int64"
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]:
return np.random.randint(low, high, shape).astype(dtype)
else:
......
......@@ -26,11 +26,12 @@ parser.add_argument(
"--case",
type=str,
help="Which case you want to test, default -1 for all cases.",
default=None)
default=None,
)
args = parser.parse_args()
class TestCaseHelper():
class TestCaseHelper:
"""
Helper class for constructing test cases.
"""
......@@ -69,7 +70,10 @@ class TestCaseHelper():
assert isinstance(self.attrs, list)
self.all_cases = []
all_lists = [
self.inputs, self.dtypes, self.attrs, *self.custom_attrs_list
self.inputs,
self.dtypes,
self.attrs,
*self.custom_attrs_list,
]
filtered_lists = filter(lambda x: len(x) > 0, all_lists)
for case in itertools.product(*filtered_lists):
......@@ -87,13 +91,21 @@ class TestCaseHelper():
no = int(re.search(r'\d+$', test_name).group(0))
assert 0 <= no and no < len(self.all_cases)
self.all_classes.append(
type(f'{self.__class__.__name__}.{self.class_name}{no}',
(self.cls, ), {"case": self.all_cases[no]}))
type(
f'{self.__class__.__name__}.{self.class_name}{no}',
(self.cls,),
{"case": self.all_cases[no]},
)
)
else:
for i, case in enumerate(self.all_cases):
self.all_classes.append(
type(f'{self.__class__.__name__}.{self.class_name}{i}',
(self.cls, ), {"case": case}))
type(
f'{self.__class__.__name__}.{self.class_name}{i}',
(self.cls,),
{"case": case},
)
)
def run(self):
"""
......
......@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAbsOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -34,7 +35,8 @@ class TestAbsOp(OpTest):
shape=self.case["x_shape"],
dtype=self.case["x_dtype"],
low=-100,
high=100)
high=100,
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True)
......@@ -45,8 +47,10 @@ class TestAbsOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("identity")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.abs(x)
prog = builder.build()
......@@ -63,24 +67,34 @@ class TestAbsOpShape(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestAbsOpShape"
self.cls = TestAbsOp
self.inputs = [{
"x_shape": [1],
}, {
"x_shape": [1024],
}, {
"x_shape": [1, 2048],
}, {
"x_shape": [1, 1, 1],
}, {
"x_shape": [32, 64],
}, {
"x_shape": [16, 8, 4, 2],
}, {
"x_shape": [16, 8, 4, 2, 1],
}]
self.dtypes = [{
"x_dtype": "float32",
}]
self.inputs = [
{
"x_shape": [1],
},
{
"x_shape": [1024],
},
{
"x_shape": [1, 2048],
},
{
"x_shape": [1, 1, 1],
},
{
"x_shape": [32, 64],
},
{
"x_shape": [16, 8, 4, 2],
},
{
"x_shape": [16, 8, 4, 2, 1],
},
]
self.dtypes = [
{
"x_dtype": "float32",
}
]
self.attrs = []
......@@ -88,21 +102,26 @@ class TestAbsOpDtype(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestAbsOpDtype"
self.cls = TestAbsOp
self.inputs = [{
"x_shape": [32, 64],
}]
self.dtypes = [{
"x_dtype": "int32",
}, {
"x_dtype": "int64",
}, {
"x_dtype": "float16",
"max_relative_error": 1e-3
}, {
"x_dtype": "float32",
}, {
"x_dtype": "float64",
}]
self.inputs = [
{
"x_shape": [32, 64],
}
]
self.dtypes = [
{
"x_dtype": "int32",
},
{
"x_dtype": "int64",
},
{"x_dtype": "float16", "max_relative_error": 1e-3},
{
"x_dtype": "float32",
},
{
"x_dtype": "float64",
},
]
self.attrs = []
......
......@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAcosOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -34,7 +35,8 @@ class TestAcosOp(OpTest):
shape=self.case["x_shape"],
dtype=self.case["x_dtype"],
low=-1,
high=1)
high=1,
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
......@@ -45,8 +47,10 @@ class TestAcosOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("acos")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.acos(x)
......@@ -56,8 +60,11 @@ class TestAcosOp(OpTest):
self.cinn_outputs = res
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -66,11 +73,12 @@ class TestAcosCase1(TestCaseHelper):
self.class_name = "TestAcosCase1"
self.cls = TestAcosOp
self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{
"x_dtype": "float32"
}, {
"x_dtype": "float64",
}]
self.dtypes = [
{"x_dtype": "float32"},
{
"x_dtype": "float64",
},
]
self.attrs = []
......@@ -78,23 +86,16 @@ class TestAcosCase2(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestAcosCase2"
self.cls = TestAcosOp
self.inputs = [{
"x_shape": [1]
}, {
"x_shape": [1024]
}, {
"x_shape": [512, 256]
}, {
"x_shape": [128, 64, 32]
}, {
"x_shape": [128, 2048, 32]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.inputs = [
{"x_shape": [1]},
{"x_shape": [1024]},
{"x_shape": [512, 256]},
{"x_shape": [128, 64, 32]},
{"x_shape": [128, 2048, 32]},
{"x_shape": [16, 8, 4, 2]},
{"x_shape": [1, 1, 1, 1]},
{"x_shape": [16, 8, 4, 2, 1]},
]
self.dtypes = [{"x_dtype": "float32"}]
self.attrs = []
......
......@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAcoshOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -34,7 +35,8 @@ class TestAcoshOp(OpTest):
low=2,
high=100,
shape=self.case["x_shape"],
dtype=self.case["x_dtype"])
dtype=self.case["x_dtype"],
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
......@@ -45,8 +47,10 @@ class TestAcoshOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("acosh")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.acosh(x)
......@@ -56,8 +60,11 @@ class TestAcoshOp(OpTest):
self.cinn_outputs = res
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -66,11 +73,12 @@ class TestAcoshCase1(TestCaseHelper):
self.class_name = "TestAcoshCase1"
self.cls = TestAcoshOp
self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{
"x_dtype": "float32"
}, {
"x_dtype": "float64",
}]
self.dtypes = [
{"x_dtype": "float32"},
{
"x_dtype": "float64",
},
]
self.attrs = []
......@@ -78,23 +86,16 @@ class TestAcoshCase2(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestAcoshCase2"
self.cls = TestAcoshOp
self.inputs = [{
"x_shape": [1]
}, {
"x_shape": [1024]
}, {
"x_shape": [512, 256]
}, {
"x_shape": [128, 64, 32]
}, {
"x_shape": [128, 2048, 32]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.inputs = [
{"x_shape": [1]},
{"x_shape": [1024]},
{"x_shape": [512, 256]},
{"x_shape": [128, 64, 32]},
{"x_shape": [128, 2048, 32]},
{"x_shape": [16, 8, 4, 2]},
{"x_shape": [1, 1, 1, 1]},
{"x_shape": [16, 8, 4, 2, 1]},
]
self.dtypes = [{"x_dtype": "float32"}]
self.attrs = []
......
......@@ -20,8 +20,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestElementwiseAddOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -32,14 +33,17 @@ class TestElementwiseAddOp(OpTest):
shape=self.case["x_shape"],
dtype=self.case["x_dtype"],
low=-10,
high=10)
high=10,
)
self.y_np = self.random(
shape=self.case["y_shape"],
dtype=self.case["y_dtype"],
low=-10,
high=10)
high=10,
)
self.dout_np = self.random(
self.case["dout_shape"], dtype=self.case["dout_dtype"])
self.case["dout_shape"], dtype=self.case["dout_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
......@@ -48,49 +52,69 @@ class TestElementwiseAddOp(OpTest):
def get_unsqueeze_axis(x_rank, y_rank, axis):
self.assertTrue(
x_rank >= y_rank,
"The rank of x should be greater or equal to that of y.")
"The rank of x should be greater or equal to that of y.",
)
axis = axis if axis >= 0 else x_rank - y_rank
unsqueeze_axis = np.arange(0, axis).tolist() + np.arange(
axis + y_rank, x_rank).tolist()
unsqueeze_axis = (
np.arange(0, axis).tolist()
+ np.arange(axis + y_rank, x_rank).tolist()
)
return unsqueeze_axis
unsqueeze_axis = get_unsqueeze_axis(
len(x.shape), len(y.shape), self.case["axis"])
y_t = paddle.unsqueeze(
y, axis=unsqueeze_axis) if len(unsqueeze_axis) > 0 else y
len(x.shape), len(y.shape), self.case["axis"]
)
y_t = (
paddle.unsqueeze(y, axis=unsqueeze_axis)
if len(unsqueeze_axis) > 0
else y
)
out = paddle.add(x, y_t)
self.paddle_outputs = [out]
self.paddle_grads = self.get_paddle_grads([out], [x, y],
[self.dout_np])
self.paddle_grads = self.get_paddle_grads([out], [x, y], [self.dout_np])
def build_cinn_program(self, target):
builder = NetBuilder("add")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
y = builder.create_input(
self.nptype2cinntype(self.case["y_dtype"]), self.case["y_shape"],
"y")
self.nptype2cinntype(self.case["y_dtype"]),
self.case["y_shape"],
"y",
)
out = builder.add(x, y, axis=self.case["axis"])
dout = builder.create_input(
self.nptype2cinntype(self.case["dout_dtype"]),
self.case["dout_shape"], "dout")
self.case["dout_shape"],
"dout",
)
x_grad, y_grad = builder.elementwise_add_grad(
dout, x, y, axis=self.case["axis"])
dout, x, y, axis=self.case["axis"]
)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x, y, dout],
[self.x_np, self.y_np, self.dout_np],
[out, x_grad, y_grad])
res = self.get_cinn_output(
prog,
target,
[x, y, dout],
[self.x_np, self.y_np, self.dout_np],
[out, x_grad, y_grad],
)
self.cinn_outputs = [res[0]]
self.cinn_grads = [res[1], res[2]]
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......
......@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestArangeOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -34,18 +35,26 @@ class TestArangeOp(OpTest):
"start": self.case["start"],
"end": self.case["end"],
"step": self.case["step"],
"dtype": self.case["dtype"]
"dtype": self.case["dtype"],
}
def build_paddle_program(self, target):
out = paddle.arange(self.inputs["start"], self.inputs["end"],
self.inputs["step"], self.inputs["dtype"])
out = paddle.arange(
self.inputs["start"],
self.inputs["end"],
self.inputs["step"],
self.inputs["dtype"],
)
self.paddle_outputs = [out]
def build_cinn_program(self, target):
builder = NetBuilder("arange")
out = builder.arange(self.inputs["start"], self.inputs["end"],
self.inputs["step"], self.inputs["dtype"])
out = builder.arange(
self.inputs["start"],
self.inputs["end"],
self.inputs["step"],
self.inputs["dtype"],
)
prog = builder.build()
res = self.get_cinn_output(prog, target, [], [], [out])
......@@ -141,9 +150,7 @@ class TestArangeOpShapeAndAttr(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = []
......@@ -170,18 +177,10 @@ class TestArangeOpDtype(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "int32"
},
{
"dtype": "int64"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
{"dtype": "int32"},
{"dtype": "int64"},
{"dtype": "float32"},
{"dtype": "float64"},
]
self.attrs = []
......
......@@ -24,18 +24,21 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestArgSortOp(OpTest):
def setUp(self):
self.init_case()
def init_case(self):
self.inputs = {
"x1": np.random.random([
2,
4,
]).astype("float32")
"x1": np.random.random(
[
2,
4,
]
).astype("float32")
}
self.axis = 1
self.descending = False
......@@ -51,8 +54,9 @@ class TestArgSortOp(OpTest):
x1 = builder.create_input(Float(32), self.inputs["x1"].shape, "x1")
out = builder.argsort(x1, self.axis, not self.descending)
prog = builder.build()
forward_res = self.get_cinn_output(prog, target, [x1],
[self.inputs["x1"]], out)
forward_res = self.get_cinn_output(
prog, target, [x1], [self.inputs["x1"]], out
)
self.cinn_outputs = np.array([forward_res[0]]).astype("int64")
......@@ -63,10 +67,12 @@ class TestArgSortOp(OpTest):
class TestArgSortCase1(TestArgSortOp):
def init_case(self):
self.inputs = {
"x1": np.random.random([
2,
4,
]).astype("float32")
"x1": np.random.random(
[
2,
4,
]
).astype("float32")
}
self.axis = 0
self.descending = False
......@@ -75,10 +81,12 @@ class TestArgSortCase1(TestArgSortOp):
class TestArgSortCase2(TestArgSortOp):
def init_case(self):
self.inputs = {
"x1": np.random.random([
2,
4,
]).astype("float32")
"x1": np.random.random(
[
2,
4,
]
).astype("float32")
}
self.axis = 0
self.descending = True
......@@ -87,10 +95,12 @@ class TestArgSortCase2(TestArgSortOp):
class TestArgSortCase3(TestArgSortOp):
def init_case(self):
self.inputs = {
"x1": np.random.random([
2,
4,
]).astype("float32")
"x1": np.random.random(
[
2,
4,
]
).astype("float32")
}
self.axis = 1
self.descending = True
......@@ -99,10 +109,12 @@ class TestArgSortCase3(TestArgSortOp):
class TestArgSortCase4(TestArgSortOp):
def init_case(self):
self.inputs = {
"x1": np.random.random([
2,
4,
]).astype("float32")
"x1": np.random.random(
[
2,
4,
]
).astype("float32")
}
self.axis = -1
self.descending = True
......
......@@ -23,8 +23,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAsinOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -35,7 +36,8 @@ class TestAsinOp(OpTest):
shape=self.case["x_shape"],
dtype=self.case["x_dtype"],
low=-1.0,
high=1.0)
high=1.0,
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True)
......@@ -45,8 +47,10 @@ class TestAsinOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("unary_elementwise_test")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.asin(x)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])
......@@ -61,24 +65,34 @@ class TestAsinOpShape(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestAsinOpShape"
self.cls = TestAsinOp
self.inputs = [{
"x_shape": [1],
}, {
"x_shape": [1024],
}, {
"x_shape": [1, 2048],
}, {
"x_shape": [1, 1, 1],
}, {
"x_shape": [32, 64],
}, {
"x_shape": [16, 8, 4, 2],
}, {
"x_shape": [16, 8, 4, 2, 1],
}]
self.dtypes = [{
"x_dtype": "float32",
}]
self.inputs = [
{
"x_shape": [1],
},
{
"x_shape": [1024],
},
{
"x_shape": [1, 2048],
},
{
"x_shape": [1, 1, 1],
},
{
"x_shape": [32, 64],
},
{
"x_shape": [16, 8, 4, 2],
},
{
"x_shape": [16, 8, 4, 2, 1],
},
]
self.dtypes = [
{
"x_dtype": "float32",
}
]
self.attrs = []
......@@ -86,14 +100,13 @@ class TestAsinOpDtype(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestAsinOpDtype"
self.cls = TestAsinOp
self.inputs = [{
"x_shape": [32, 64],
}]
self.dtypes = [
self.inputs = [
{
"x_dtype": "float16",
"max_relative_error": 1e-3
},
"x_shape": [32, 64],
}
]
self.dtypes = [
{"x_dtype": "float16", "max_relative_error": 1e-3},
{
"x_dtype": "float32",
},
......
......@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAsinhOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -31,7 +32,8 @@ class TestAsinhOp(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
......@@ -42,8 +44,10 @@ class TestAsinhOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("asinh")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.asinh(x)
......@@ -53,8 +57,11 @@ class TestAsinhOp(OpTest):
self.cinn_outputs = res
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -63,11 +70,12 @@ class TestAsinhCase1(TestCaseHelper):
self.class_name = "TestAsinhCase1"
self.cls = TestAsinhOp
self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{
"x_dtype": "float32"
}, {
"x_dtype": "float64",
}]
self.dtypes = [
{"x_dtype": "float32"},
{
"x_dtype": "float64",
},
]
self.attrs = []
......@@ -75,23 +83,16 @@ class TestAsinhCase2(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestAsinhCase2"
self.cls = TestAsinhOp
self.inputs = [{
"x_shape": [1]
}, {
"x_shape": [1024]
}, {
"x_shape": [512, 256]
}, {
"x_shape": [128, 64, 32]
}, {
"x_shape": [128, 2048, 32]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.inputs = [
{"x_shape": [1]},
{"x_shape": [1024]},
{"x_shape": [512, 256]},
{"x_shape": [128, 64, 32]},
{"x_shape": [128, 2048, 32]},
{"x_shape": [16, 8, 4, 2]},
{"x_shape": [1, 1, 1, 1]},
{"x_shape": [16, 8, 4, 2, 1]},
]
self.dtypes = [{"x_dtype": "float32"}]
self.attrs = []
......
......@@ -19,8 +19,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAtan2Op(OpTest):
def setUp(self):
# print(f"\n{self.__class__.__name__}: {self.case}")
......@@ -31,12 +32,14 @@ class TestAtan2Op(OpTest):
shape=self.case["x_shape"],
dtype=self.case["x_dtype"],
low=self.case["x_low"],
high=self.case["x_high"])
high=self.case["x_high"],
)
self.y_np = self.random(
shape=self.case["y_shape"],
dtype=self.case["y_dtype"],
low=self.case["y_low"],
high=self.case["y_high"])
high=self.case["y_high"],
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
......@@ -47,20 +50,28 @@ class TestAtan2Op(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("atan2")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
y = builder.create_input(
self.nptype2cinntype(self.case["y_dtype"]), self.case["y_shape"],
"y")
self.nptype2cinntype(self.case["y_dtype"]),
self.case["y_shape"],
"y",
)
out = builder.atan2(x, y)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x, y],
[self.x_np, self.y_np], [out])
res = self.get_cinn_output(
prog, target, [x, y], [self.x_np, self.y_np], [out]
)
self.cinn_outputs = res
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -100,12 +111,14 @@ class TestAtan2OpShapes(TestCaseHelper):
"y_dtype": "float32",
},
]
self.attrs = [{
"x_low": -10,
"x_high": 10,
"y_low": -10,
"y_high": 10,
}]
self.attrs = [
{
"x_low": -10,
"x_high": 10,
"y_low": -10,
"y_high": 10,
}
]
class TestAtan2OpDtypes(TestAtan2OpShapes):
......
......@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAtanOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -31,7 +32,8 @@ class TestAtanOp(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
......@@ -42,8 +44,10 @@ class TestAtanOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("atan")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.atan(x)
......@@ -53,8 +57,11 @@ class TestAtanOp(OpTest):
self.cinn_outputs = res
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -63,11 +70,12 @@ class TestAtanCase1(TestCaseHelper):
self.class_name = "TestAtanCase1"
self.cls = TestAtanOp
self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{
"x_dtype": "float32"
}, {
"x_dtype": "float64",
}]
self.dtypes = [
{"x_dtype": "float32"},
{
"x_dtype": "float64",
},
]
self.attrs = []
......@@ -75,23 +83,16 @@ class TestAtanCase2(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestAtanCase2"
self.cls = TestAtanOp
self.inputs = [{
"x_shape": [1]
}, {
"x_shape": [1024]
}, {
"x_shape": [512, 256]
}, {
"x_shape": [128, 64, 32]
}, {
"x_shape": [128, 2048, 32]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.inputs = [
{"x_shape": [1]},
{"x_shape": [1024]},
{"x_shape": [512, 256]},
{"x_shape": [128, 64, 32]},
{"x_shape": [128, 2048, 32]},
{"x_shape": [16, 8, 4, 2]},
{"x_shape": [1, 1, 1, 1]},
{"x_shape": [16, 8, 4, 2, 1]},
]
self.dtypes = [{"x_dtype": "float32"}]
self.attrs = []
......
......@@ -22,8 +22,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestAtanhOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -31,7 +32,8 @@ class TestAtanhOp(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
......@@ -42,8 +44,10 @@ class TestAtanhOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("atanh")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.atanh(x)
......@@ -53,8 +57,11 @@ class TestAtanhOp(OpTest):
self.cinn_outputs = res
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -63,11 +70,12 @@ class TestAtanhCase1(TestCaseHelper):
self.class_name = "TestAtanhCase1"
self.cls = TestAtanhOp
self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{
"x_dtype": "float32"
}, {
"x_dtype": "float64",
}]
self.dtypes = [
{"x_dtype": "float32"},
{
"x_dtype": "float64",
},
]
self.attrs = []
......@@ -75,23 +83,16 @@ class TestAtanhCase2(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestAtanhCase2"
self.cls = TestAtanhOp
self.inputs = [{
"x_shape": [1]
}, {
"x_shape": [1024]
}, {
"x_shape": [512, 256]
}, {
"x_shape": [128, 64, 32]
}, {
"x_shape": [128, 2048, 32]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.inputs = [
{"x_shape": [1]},
{"x_shape": [1024]},
{"x_shape": [512, 256]},
{"x_shape": [128, 64, 32]},
{"x_shape": [128, 2048, 32]},
{"x_shape": [16, 8, 4, 2]},
{"x_shape": [1, 1, 1, 1]},
{"x_shape": [16, 8, 4, 2, 1]},
]
self.dtypes = [{"x_dtype": "float32"}]
self.attrs = []
......
......@@ -24,8 +24,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBatchNormTrainOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -33,12 +34,14 @@ class TestBatchNormTrainOp(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np)
batch_norm = paddle.nn.BatchNorm(
self.case["x_shape"][1], act=None, is_test=False)
self.case["x_shape"][1], act=None, is_test=False
)
out = batch_norm(x)
self.paddle_outputs = [out]
......@@ -48,27 +51,37 @@ class TestBatchNormTrainOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("batch_norm")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
scale = builder.fill_constant([self.case["x_shape"][1]], 1.0, 'scale',
'float32')
bias = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'bias',
'float32')
mean = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'mean',
'float32')
variance = builder.fill_constant([self.case["x_shape"][1]], 1.0,
'variance', 'float32')
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
scale = builder.fill_constant(
[self.case["x_shape"][1]], 1.0, 'scale', 'float32'
)
bias = builder.fill_constant(
[self.case["x_shape"][1]], 0.0, 'bias', 'float32'
)
mean = builder.fill_constant(
[self.case["x_shape"][1]], 0.0, 'mean', 'float32'
)
variance = builder.fill_constant(
[self.case["x_shape"][1]], 1.0, 'variance', 'float32'
)
out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)
prog = builder.build()
forward_res = self.get_cinn_output(
prog, target, [x], [self.x_np], out, passes=[])
prog, target, [x], [self.x_np], out, passes=[]
)
self.cinn_outputs = [forward_res[0]]
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -89,24 +102,16 @@ class TestBatchNormTrainOpAll(TestCaseHelper):
},
]
self.dtypes = [
{
"x_dtype": "float16",
"max_relative_error": 1e-3
},
{
"x_dtype": "float32",
"max_relative_error": 1e-5
},
{
"x_dtype": "bfloat16",
"max_relative_error": 1e-2
},
{"x_dtype": "float16", "max_relative_error": 1e-3},
{"x_dtype": "float32", "max_relative_error": 1e-5},
{"x_dtype": "bfloat16", "max_relative_error": 1e-2},
]
self.attrs = []
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBatchNormBackwardOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -114,14 +119,17 @@ class TestBatchNormBackwardOp(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
self.y_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
batch_norm = paddle.nn.BatchNorm(
self.case["x_shape"][1], act=None, is_test=False)
self.case["x_shape"][1], act=None, is_test=False
)
out = batch_norm(x)
self.paddle_outputs = [out]
......@@ -132,52 +140,72 @@ class TestBatchNormBackwardOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("batch_norm")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
scale = builder.fill_constant([self.case["x_shape"][1]], 1.0, 'scale',
'float32')
bias = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'bias',
'float32')
mean = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'mean',
'float32')
variance = builder.fill_constant([self.case["x_shape"][1]], 1.0,
'variance', 'float32')
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
scale = builder.fill_constant(
[self.case["x_shape"][1]], 1.0, 'scale', 'float32'
)
bias = builder.fill_constant(
[self.case["x_shape"][1]], 0.0, 'bias', 'float32'
)
mean = builder.fill_constant(
[self.case["x_shape"][1]], 0.0, 'mean', 'float32'
)
variance = builder.fill_constant(
[self.case["x_shape"][1]], 1.0, 'variance', 'float32'
)
out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)
prog = builder.build()
forward_res = self.get_cinn_output(
prog, target, [x], [self.x_np], out, passes=[])
prog, target, [x], [self.x_np], out, passes=[]
)
self.cinn_outputs = [forward_res[0]]
builder_grad = NetBuilder("batch_norm_grad")
dout = builder_grad.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"dout")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"dout",
)
x_g = builder_grad.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x_g")
scale_g = builder_grad.fill_constant(scale.shape(), 1.0, 'scale_g',
'float32')
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x_g",
)
scale_g = builder_grad.fill_constant(
scale.shape(), 1.0, 'scale_g', 'float32'
)
save_mean = builder_grad.create_input(
self.nptype2cinntype('float32'), out[1].shape(), "save_mean")
self.nptype2cinntype('float32'), out[1].shape(), "save_mean"
)
save_variance = builder_grad.create_input(
self.nptype2cinntype('float32'), out[2].shape(), "save_variance")
self.nptype2cinntype('float32'), out[2].shape(), "save_variance"
)
out_grad = builder_grad.batch_norm_grad(dout, x_g, scale_g, save_mean,
save_variance)
out_grad = builder_grad.batch_norm_grad(
dout, x_g, scale_g, save_mean, save_variance
)
prog = builder_grad.build()
backward_res = self.get_cinn_output(
prog,
target, [dout, x_g, save_mean, save_variance],
target,
[dout, x_g, save_mean, save_variance],
[self.y_np, self.x_np, forward_res[1], forward_res[2]],
out_grad,
passes=[])
passes=[],
)
self.cinn_grads = [backward_res[0]]
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -198,20 +226,15 @@ class TestBatchNormBackwardOpAll(TestCaseHelper):
},
]
self.dtypes = [
{
"x_dtype": "float16",
"max_relative_error": 1e-3
},
{
"x_dtype": "float32",
"max_relative_error": 1e-5
},
{"x_dtype": "float16", "max_relative_error": 1e-3},
{"x_dtype": "float32", "max_relative_error": 1e-5},
]
self.attrs = []
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBatchNormInferOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -219,12 +242,14 @@ class TestBatchNormInferOp(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np)
batch_norm = paddle.nn.BatchNorm(
self.case["x_shape"][1], act=None, is_test=True)
self.case["x_shape"][1], act=None, is_test=True
)
out = batch_norm(x)
self.paddle_outputs = [out]
......@@ -234,22 +259,29 @@ class TestBatchNormInferOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("batch_norm")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
scale = builder.fill_constant([self.case["x_shape"][1]], 1.0, 'scale',
'float32')
bias = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'bias',
'float32')
mean = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'mean',
'float32')
variance = builder.fill_constant([self.case["x_shape"][1]], 1.0,
'variance', 'float32')
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
scale = builder.fill_constant(
[self.case["x_shape"][1]], 1.0, 'scale', 'float32'
)
bias = builder.fill_constant(
[self.case["x_shape"][1]], 0.0, 'bias', 'float32'
)
mean = builder.fill_constant(
[self.case["x_shape"][1]], 0.0, 'mean', 'float32'
)
variance = builder.fill_constant(
[self.case["x_shape"][1]], 1.0, 'variance', 'float32'
)
out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)
prog = builder.build()
forward_res = self.get_cinn_output(
prog, target, [x], [self.x_np], out, passes=[])
prog, target, [x], [self.x_np], out, passes=[]
)
self.cinn_outputs = [forward_res[0]]
def test_check_results(self):
......@@ -273,10 +305,7 @@ class TestBatchNormInferOpAll(TestCaseHelper):
},
]
self.dtypes = [
{
"x_dtype": "float32",
"max_relative_error": 1e-5
},
{"x_dtype": "float32", "max_relative_error": 1e-5},
]
self.attrs = []
......
......@@ -23,8 +23,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBinaryOp(OpTest):
def setUp(self):
self.init_case()
......@@ -55,18 +56,24 @@ class TestBinaryOp(OpTest):
def get_unsqueeze_axis(x_rank, y_rank, axis):
self.assertTrue(
x_rank >= y_rank,
"The rank of x should be greater or equal to that of y.")
"The rank of x should be greater or equal to that of y.",
)
axis = axis if axis >= 0 else x_rank - y_rank
unsqueeze_axis = np.arange(0, axis).tolist() + np.arange(
axis + y_rank, x_rank).tolist()
unsqueeze_axis = (
np.arange(0, axis).tolist()
+ np.arange(axis + y_rank, x_rank).tolist()
)
return unsqueeze_axis
unsqueeze_axis = get_unsqueeze_axis(
len(self.inputs["x"].shape), len(self.inputs["y"].shape),
self.axis)
y_t = paddle.unsqueeze(
y, axis=unsqueeze_axis) if len(unsqueeze_axis) > 0 else y
len(self.inputs["x"].shape), len(self.inputs["y"].shape), self.axis
)
y_t = (
paddle.unsqueeze(y, axis=unsqueeze_axis)
if len(unsqueeze_axis) > 0
else y
)
out = self.paddle_func(x, y_t)
self.paddle_outputs = [out]
......@@ -75,15 +82,20 @@ class TestBinaryOp(OpTest):
builder = NetBuilder("binary_elementwise_test")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
self.inputs["x"].shape,
"x",
)
y = builder.create_input(
self.nptype2cinntype(self.inputs["y"].dtype),
self.inputs["y"].shape, "y")
self.inputs["y"].shape,
"y",
)
out = self.cinn_func(builder, x, y, axis=self.axis)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x, y],
[self.inputs["x"], self.inputs["y"]], [out])
res = self.get_cinn_output(
prog, target, [x, y], [self.inputs["x"], self.inputs["y"]], [out]
)
self.cinn_outputs = res
......@@ -158,13 +170,17 @@ class TestMultiplyOp(TestBinaryOp):
class TestFloorDivideOp(TestBinaryOp):
def get_x_data(self):
# avoid random generate 0
return self.random([32, 64], 'int32', 1, 100) * np.random.choice(
[-1, 1], [1])[0]
return (
self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
def get_y_data(self):
# avoid random generate 0
return self.random([32, 64], 'int32', 1, 100) * np.random.choice(
[-1, 1], [1])[0]
return (
self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
def paddle_func(self, x, y):
return paddle.floor_divide(x, y)
......@@ -183,12 +199,16 @@ class TestModOp(TestBinaryOp):
class TestModCase1(TestModOp):
def get_x_data(self):
return self.random([32, 64], 'int32', 1, 100) * np.random.choice(
[-1, 1], [1])[0]
return (
self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
def get_y_data(self):
return self.random([32, 64], 'int32', 1, 100) * np.random.choice(
[-1, 1], [1])[0]
return (
self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
class TestRemainderOp(TestBinaryOp):
......@@ -202,12 +222,16 @@ class TestRemainderOp(TestBinaryOp):
class TestRemainderCase1(TestRemainderOp):
def get_x_data(self):
return self.random([32, 64], 'int32', 1, 100) * np.random.choice(
[-1, 1], [1])[0]
return (
self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
def get_y_data(self):
return self.random([32, 64], 'int32', 1, 100) * np.random.choice(
[-1, 1], [1])[0]
return (
self.random([32, 64], 'int32', 1, 100)
* np.random.choice([-1, 1], [1])[0]
)
class TestMaxOp(TestBinaryOp):
......
......@@ -23,8 +23,9 @@ from cinn.common import *
from struct import pack, unpack
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBitcastConvertOp(OpTest):
def setUp(self):
self.init_case()
......@@ -35,9 +36,10 @@ class TestBitcastConvertOp(OpTest):
packed = pack(data.size * 'i', *data.flatten())
self.inputs = {"x": data}
self.outputs = {
"y": np.array(unpack('12B', packed), dtype='uint8').reshape((3, 1,
4)),
"output_type": "uint8"
"y": np.array(unpack('12B', packed), dtype='uint8').reshape(
(3, 1, 4)
),
"output_type": "uint8",
}
def build_paddle_program(self, target):
......@@ -48,11 +50,12 @@ class TestBitcastConvertOp(OpTest):
builder = NetBuilder("bitcast_convert")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
self.inputs["x"].shape,
"x",
)
out = builder.bitcast_convert(x, self.outputs["output_type"])
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]],
[out])
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
self.cinn_outputs = [res[0]]
def test_check_results(self):
......@@ -67,7 +70,7 @@ class TestBitcastConvertCase1(TestBitcastConvertOp):
self.inputs = {"x": data}
self.outputs = {
"y": np.array(unpack('4i', packed), dtype='int32').reshape((4)),
"output_type": "int32"
"output_type": "int32",
}
......@@ -78,9 +81,10 @@ class TestBitcastConvertCase2(TestBitcastConvertOp):
packed = pack(data.size * 'f', *data.flatten())
self.inputs = {"x": data}
self.outputs = {
"y": np.array(unpack('12d', packed), dtype='float64').reshape((4,
3)),
"output_type": "float64"
"y": np.array(unpack('12d', packed), dtype='float64').reshape(
(4, 3)
),
"output_type": "float64",
}
......@@ -91,11 +95,10 @@ class TestBitcastConvertCase3(TestBitcastConvertOp):
packed = pack(data.size * 'f', *data.flatten())
self.inputs = {"x": data}
self.outputs = {
"y":
np.array(unpack('48H', packed), dtype='uint16').reshape((4, 3, 2,
2)),
"output_type":
"uint16"
"y": np.array(unpack('48H', packed), dtype='uint16').reshape(
(4, 3, 2, 2)
),
"output_type": "uint16",
}
......
......@@ -20,8 +20,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBitwiseOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -33,19 +34,23 @@ class TestBitwiseOp(OpTest):
self.x_np = np.full(
shape=self.case["x_shape"],
fill_value=np.inf,
dtype=self.case["dtype"])
dtype=self.case["dtype"],
)
# Test with nan values
elif "with_nan" in self.case:
self.x_np = np.full(
shape=self.case["x_shape"],
fill_value=np.nan,
dtype=self.case["dtype"])
dtype=self.case["dtype"],
)
else:
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["dtype"])
shape=self.case["x_shape"], dtype=self.case["dtype"]
)
if self.case["op_type"] != "not":
self.y_np = self.random(
shape=self.case["y_shape"], dtype=self.case["dtype"])
shape=self.case["y_shape"], dtype=self.case["dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
......@@ -66,12 +71,14 @@ class TestBitwiseOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("bitwise")
x = builder.create_input(
self.nptype2cinntype(self.case["dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["dtype"]), self.case["x_shape"], "x"
)
if self.case["op_type"] != "not":
y = builder.create_input(
self.nptype2cinntype(self.case["dtype"]), self.case["y_shape"],
"y")
self.nptype2cinntype(self.case["dtype"]),
self.case["y_shape"],
"y",
)
if self.case["op_type"] == "and":
out = builder.bitwise_and(x, y)
elif self.case["op_type"] == "or":
......@@ -84,15 +91,19 @@ class TestBitwiseOp(OpTest):
out = builder.identity(x)
prog = builder.build()
if self.case["op_type"] != "not":
res = self.get_cinn_output(prog, target, [x, y],
[self.x_np, self.y_np], [out])
res = self.get_cinn_output(
prog, target, [x, y], [self.x_np, self.y_np], [out]
)
else:
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])
self.cinn_outputs = res
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -131,23 +142,13 @@ class TestBitwiseOpShape(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "int32"
},
{"dtype": "int32"},
]
self.attrs = [
{
"op_type": "and"
},
{
"op_type": "or"
},
{
"op_type": "xor"
},
{
"op_type": "not"
},
{"op_type": "and"},
{"op_type": "or"},
{"op_type": "xor"},
{"op_type": "not"},
]
......@@ -162,38 +163,18 @@ class TestBitwiseOpDtype(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "bool"
},
{
"dtype": "uint8"
},
{
"dtype": "int8"
},
{
"dtype": "int16"
},
{
"dtype": "int32"
},
{
"dtype": "int64"
},
{"dtype": "bool"},
{"dtype": "uint8"},
{"dtype": "int8"},
{"dtype": "int16"},
{"dtype": "int32"},
{"dtype": "int64"},
]
self.attrs = [
{
"op_type": "and"
},
{
"op_type": "or"
},
{
"op_type": "xor"
},
{
"op_type": "not"
},
{"op_type": "and"},
{"op_type": "or"},
{"op_type": "xor"},
{"op_type": "not"},
]
......
......@@ -45,11 +45,11 @@ class TestBroadcastToOp(OpTest):
builder = NetBuilder("BroadcastTo")
x = builder.create_input(Float(32), self.inputs["x"].shape, "x")
out = builder.broadcast_to(
x, out_shape=self.out_shape, broadcast_axes=self.broadcast_axes)
x, out_shape=self.out_shape, broadcast_axes=self.broadcast_axes
)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]],
[out])
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
self.cinn_outputs = res
......@@ -119,8 +119,7 @@ class TestBroadcastToOpNoAxes(OpTest):
out = builder.broadcast_to(x, out_shape=self.out_shape)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]],
[out])
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
self.cinn_outputs = res
......
......@@ -21,8 +21,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestBroadcastToOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -30,7 +31,8 @@ class TestBroadcastToOp(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True)
......@@ -41,12 +43,15 @@ class TestBroadcastToOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("BroadcastTo")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.broadcast_to(
x,
out_shape=self.case["d_shape"],
broadcast_axes=self.case["broadcast_axes"])
broadcast_axes=self.case["broadcast_axes"],
)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])
......@@ -54,8 +59,11 @@ class TestBroadcastToOp(OpTest):
self.cinn_outputs = [res[0]]
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -113,9 +121,9 @@ class TestBroadcastToAllTwo(TestCaseHelper):
{
"x_dtype": "bool",
},
#{
# {
# "x_dtype": "int8",
#},
# },
{
"x_dtype": "int32",
},
......@@ -142,7 +150,8 @@ class TestBroadcastToOpNoAxes(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True)
......@@ -153,8 +162,10 @@ class TestBroadcastToOpNoAxes(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("BroadcastTo")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.broadcast_to(x, out_shape=self.case["d_shape"])
prog = builder.build()
......@@ -163,8 +174,11 @@ class TestBroadcastToOpNoAxes(OpTest):
self.cinn_outputs = [res[0]]
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -205,10 +219,10 @@ class TestBroadcastToOpNoAxesAllOne(TestCaseHelper):
"x_shape": [64, 32, 16, 8],
"d_shape": [128, 64, 32, 16, 8],
},
#{
# {
# "x_shape": [128, 64, 32, 16, 8],
# "d_shape": [256, 128, 64, 32, 16, 8],
#},
# },
]
self.dtypes = [
{
......
......@@ -24,8 +24,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCastOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -33,7 +34,8 @@ class TestCastOp(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True)
......@@ -46,8 +48,10 @@ class TestCastOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("cast")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.cast(x, self.case["d_dtype"])
prog = builder.build()
......@@ -56,8 +60,11 @@ class TestCastOp(OpTest):
self.cinn_outputs = [res[0]]
def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
max_relative_error = (
self.case["max_relative_error"]
if "max_relative_error" in self.case
else 1e-5
)
self.check_outputs_and_grads(max_relative_error=max_relative_error)
......@@ -79,21 +86,27 @@ class TestCastShape(TestCaseHelper):
"x_shape": [16, 8, 4, 2],
},
]
self.dtypes = [{
"x_dtype": "float32",
}]
self.attrs = [{
"d_dtype": "float64",
}]
self.dtypes = [
{
"x_dtype": "float32",
}
]
self.attrs = [
{
"d_dtype": "float64",
}
]
class TestCastDtype(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestCastOpCase"
self.cls = TestCastOp
self.inputs = [{
"x_shape": [32, 64],
}]
self.inputs = [
{
"x_shape": [32, 64],
}
]
self.dtypes = [
{
"x_dtype": "bool",
......@@ -101,19 +114,12 @@ class TestCastDtype(TestCaseHelper):
{
"x_dtype": "int8",
},
{
"x_dtype": "int16"
},
{"x_dtype": "int16"},
{
"x_dtype": "int32",
},
{
"x_dtype": "int64"
},
{
"x_dtype": "float16",
"max_relative_error": 1e-3
},
{"x_dtype": "int64"},
{"x_dtype": "float16", "max_relative_error": 1e-3},
{
"x_dtype": "float32",
},
......@@ -128,19 +134,12 @@ class TestCastDtype(TestCaseHelper):
{
"d_dtype": "int8",
},
{
"d_dtype": "int16"
},
{"d_dtype": "int16"},
{
"d_dtype": "int32",
},
{
"d_dtype": "int64"
},
{
"d_dtype": "float16",
"max_relative_error": 1e-3
},
{"d_dtype": "int64"},
{"d_dtype": "float16", "max_relative_error": 1e-3},
{
"d_dtype": "float32",
},
......
......@@ -22,8 +22,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCbrtOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -32,8 +33,9 @@ class TestCbrtOp(OpTest):
def prepare_inputs(self):
self.inputs = {
"x":
self.random(self.case["shape"], self.case["dtype"], -100.0, 100.0),
"x": self.random(
self.case["shape"], self.case["dtype"], -100.0, 100.0
),
}
def build_paddle_program(self, target):
......@@ -45,18 +47,20 @@ class TestCbrtOp(OpTest):
builder = NetBuilder("cbrt")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
self.inputs["x"].shape,
"x",
)
out = builder.cbrt(x)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]],
[out])
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
self.cinn_outputs = res
def test_check_results(self):
self.check_outputs_and_grads(
max_relative_error=1e-3, max_absolute_error=1e-3)
max_relative_error=1e-3, max_absolute_error=1e-3
)
class TestCbrtOpShape(TestCaseHelper):
......@@ -102,9 +106,7 @@ class TestCbrtOpShape(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = []
......@@ -125,15 +127,9 @@ class TestCbrtOpDtype(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float16"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
{"dtype": "float16"},
{"dtype": "float32"},
{"dtype": "float64"},
]
self.attrs = []
......
......@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCeilOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -31,8 +32,9 @@ class TestCeilOp(OpTest):
def prepare_inputs(self):
self.inputs = {
"x":
self.random(self.case["shape"], self.case["dtype"], -100.0, 100.0),
"x": self.random(
self.case["shape"], self.case["dtype"], -100.0, 100.0
),
}
def build_paddle_program(self, target):
......@@ -47,12 +49,13 @@ class TestCeilOp(OpTest):
builder = NetBuilder("ceil")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
self.inputs["x"].shape,
"x",
)
out = builder.ceil(x)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]],
[out])
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
self.cinn_outputs = res
......@@ -103,9 +106,7 @@ class TestCeilOpShape(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = []
......@@ -126,15 +127,9 @@ class TestCeilOpDtype(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float16"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
{"dtype": "float16"},
{"dtype": "float32"},
{"dtype": "float64"},
]
self.attrs = []
......
......@@ -22,8 +22,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCholeskyOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -34,14 +35,16 @@ class TestCholeskyOp(OpTest):
if "batch_dim" in self.case and self.case["batch_dim"] > 0:
x = []
for _ in range(self.case["batch_dim"]):
matrix = self.random(self.case["shape"], self.case["dtype"],
-1.0, 1.0)
matrix = self.random(
self.case["shape"], self.case["dtype"], -1.0, 1.0
)
matrix_t = np.transpose(matrix, [1, 0])
x.append(np.dot(matrix, matrix_t))
x = np.stack(x)
else:
matrix = self.random(self.case["shape"], self.case["dtype"], -1.0,
1.0)
matrix = self.random(
self.case["shape"], self.case["dtype"], -1.0, 1.0
)
matrix_t = np.transpose(matrix, [1, 0])
x = np.dot(matrix, matrix_t)
self.inputs = {"x": x}
......@@ -56,11 +59,14 @@ class TestCholeskyOp(OpTest):
builder = NetBuilder("cholesky")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
self.inputs["x"].shape,
"x",
)
out = builder.cholesky(x, self.upper)
prog = builder.build()
res = self.get_cinn_output(
prog, target, [x], [self.inputs["x"]], [out], passes=[])
prog, target, [x], [self.inputs["x"]], [out], passes=[]
)
self.cinn_outputs = [res[0]]
def test_check_results(self):
......@@ -83,14 +89,10 @@ class TestCholeskyOpShape(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = [
{
"upper": False
},
{"upper": False},
]
......@@ -107,23 +109,12 @@ class TestCholeskyOpLargeShape(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float64"
},
{"dtype": "float64"},
]
self.attrs = [
{
"upper": False,
"batch_dim": 2
},
{
"upper": False,
"batch_dim": 4
},
{
"upper": True,
"batch_dim": 8
},
{"upper": False, "batch_dim": 2},
{"upper": False, "batch_dim": 4},
{"upper": True, "batch_dim": 8},
]
......@@ -143,17 +134,11 @@ class TestCholeskyOpDtype(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{
"dtype": "float64"
},
{"dtype": "float32"},
{"dtype": "float64"},
]
self.attrs = [
{
"upper": False
},
{"upper": False},
]
......@@ -173,23 +158,12 @@ class TestCholeskyOpBatch(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = [
{
"upper": False,
"batch_dim": 1
},
{
"upper": False,
"batch_dim": 4
},
{
"upper": False,
"batch_dim": 8
},
{"upper": False, "batch_dim": 1},
{"upper": False, "batch_dim": 4},
{"upper": False, "batch_dim": 8},
]
......@@ -209,12 +183,8 @@ class TestCholeskyOpAttrs(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{
"dtype": "float64"
},
{"dtype": "float32"},
{"dtype": "float64"},
]
self.attrs = [
{
......
......@@ -37,7 +37,7 @@ def count_leading_zeros(integer, dtype):
if integer < 0:
return 0
mask = 1 << (bits - 1)
integer &= (mask - 1)
integer &= mask - 1
clz = 0
while mask > 0 and integer & mask == 0:
clz += 1
......@@ -45,8 +45,9 @@ def count_leading_zeros(integer, dtype):
return clz
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestClzOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -59,8 +60,11 @@ class TestClzOp(OpTest):
high = INT32_MAX if dtype == "int32" else INT64_MAX
x = self.random(self.case["shape"], dtype, low=low, high=high)
y = list(
map(lambda num: count_leading_zeros(num, dtype),
x.reshape(-1).tolist()))
map(
lambda num: count_leading_zeros(num, dtype),
x.reshape(-1).tolist(),
)
)
self.inputs = {"x": x}
self.outputs = {"y": np.array(y).reshape(x.shape).astype(dtype)}
......@@ -72,11 +76,12 @@ class TestClzOp(OpTest):
builder = NetBuilder("clz")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
self.inputs["x"].shape,
"x",
)
out = builder.clz(x)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]],
[out])
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out])
self.cinn_outputs = res
def test_check_results(self):
......@@ -126,12 +131,8 @@ class TestClzOpShapeDtype(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "int32"
},
{
"dtype": "int64"
},
{"dtype": "int32"},
{"dtype": "int64"},
]
self.attrs = []
......
......@@ -19,8 +19,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestComparisonOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -31,12 +32,12 @@ class TestComparisonOp(OpTest):
if self.case["broadcast"]:
self.inputs = {
"x": self.random(self.case["x_shape"], self.case["dtype"]),
"y": self.random(self.case["y_shape"], self.case["dtype"])
"y": self.random(self.case["y_shape"], self.case["dtype"]),
}
else:
self.inputs = {
"x": self.random(self.case["shape"], self.case["dtype"]),
"y": self.random(self.case["shape"], self.case["dtype"])
"y": self.random(self.case["shape"], self.case["dtype"]),
}
self.operation = self.case["operation"]
......@@ -63,10 +64,14 @@ class TestComparisonOp(OpTest):
builder = NetBuilder("select")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
self.inputs["x"].shape,
"x",
)
y = builder.create_input(
self.nptype2cinntype(self.inputs["y"].dtype),
self.inputs["y"].shape, "y")
self.inputs["y"].shape,
"y",
)
if self.operation == "equal":
out = builder.equal(x, y)
......@@ -83,8 +88,9 @@ class TestComparisonOp(OpTest):
else:
raise NotImplementedError
prog = builder.build()
res = self.get_cinn_output(prog, target, [x, y],
[self.inputs["x"], self.inputs["y"]], [out])
res = self.get_cinn_output(
prog, target, [x, y], [self.inputs["x"], self.inputs["y"]], [out]
)
self.cinn_outputs = res
def test_check_results(self):
......@@ -144,43 +150,21 @@ class TestComparisonOpShape(TestCaseHelper):
{
"shape": [131072],
},
{
"shape": [1048576]
},
{"shape": [1048576]},
{
"shape": [64, 32, 16, 8, 4],
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = [
{
"operation": "equal",
"broadcast": False
},
{
"operation": "not_equal",
"broadcast": False
},
{
"operation": "greater_than",
"broadcast": False
},
{
"operation": "less_than",
"broadcast": False
},
{
"operation": "greater_equal",
"broadcast": False
},
{
"operation": "less_equal",
"broadcast": False
},
{"operation": "equal", "broadcast": False},
{"operation": "not_equal", "broadcast": False},
{"operation": "greater_than", "broadcast": False},
{"operation": "less_than", "broadcast": False},
{"operation": "greater_equal", "broadcast": False},
{"operation": "less_equal", "broadcast": False},
]
......@@ -197,50 +181,20 @@ class TestComparisonOpDtype(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float16"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
{
"dtype": "bool"
},
{
"dtype": "int32"
},
{
"dtype": "int64"
},
{"dtype": "float16"},
{"dtype": "float32"},
{"dtype": "float64"},
{"dtype": "bool"},
{"dtype": "int32"},
{"dtype": "int64"},
]
self.attrs = [
{
"operation": "equal",
"broadcast": False
},
{
"operation": "not_equal",
"broadcast": False
},
{
"operation": "greater_than",
"broadcast": False
},
{
"operation": "less_than",
"broadcast": False
},
{
"operation": "greater_equal",
"broadcast": False
},
{
"operation": "less_equal",
"broadcast": False
},
{"operation": "equal", "broadcast": False},
{"operation": "not_equal", "broadcast": False},
{"operation": "greater_than", "broadcast": False},
{"operation": "less_than", "broadcast": False},
{"operation": "greater_equal", "broadcast": False},
{"operation": "less_equal", "broadcast": False},
]
......@@ -319,35 +273,15 @@ class TestComparisonOpBroadcastTest(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = [
{
"operation": "equal",
"broadcast": True
},
{
"operation": "not_equal",
"broadcast": True
},
{
"operation": "greater_than",
"broadcast": True
},
{
"operation": "less_than",
"broadcast": True
},
{
"operation": "greater_equal",
"broadcast": True
},
{
"operation": "less_equal",
"broadcast": True
},
{"operation": "equal", "broadcast": True},
{"operation": "not_equal", "broadcast": True},
{"operation": "greater_than", "broadcast": True},
{"operation": "less_than", "broadcast": True},
{"operation": "greater_equal", "broadcast": True},
{"operation": "less_equal", "broadcast": True},
]
......
......@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestConcatOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -47,7 +48,8 @@ class TestConcatOp(OpTest):
def cinn_inputs(self, builder, inputs):
return [
builder.create_input(
self.nptype2cinntype(data.dtype), data.shape, name)
self.nptype2cinntype(data.dtype), data.shape, name
)
for name, data in inputs.items()
]
......@@ -67,8 +69,7 @@ class TestConcatOp(OpTest):
input_datas = [data for _, data in self.inputs.items()]
res = self.get_cinn_output(prog, target, input_list, input_datas,
[out])
res = self.get_cinn_output(prog, target, input_list, input_datas, [out])
self.cinn_outputs = res
......@@ -119,14 +120,10 @@ class TestConcatOpShape(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = [
{
"axis": 0
},
{"axis": 0},
]
......@@ -149,35 +146,17 @@ class TestConcatOpDtype(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float16"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
{
"dtype": "bool"
},
{
"dtype": "uint8"
},
{
"dtype": "int8"
},
{
"dtype": "int32"
},
{
"dtype": "int64"
},
{"dtype": "float16"},
{"dtype": "float32"},
{"dtype": "float64"},
{"dtype": "bool"},
{"dtype": "uint8"},
{"dtype": "int8"},
{"dtype": "int32"},
{"dtype": "int64"},
]
self.attrs = [
{
"axis": 0
},
{"axis": 0},
]
......@@ -187,80 +166,39 @@ class TestConcatOpMultipleInputs(TestCaseHelper):
self.cls = TestConcatOp
self.inputs = [
# 1D tensor with 1~4 inputs
{
"shapes": [[10]],
"axis": 0
},
{
"shapes": [[10], [6]],
"axis": 0
},
{
"shapes": [[10], [6], [8]],
"axis": 0
},
{
"shapes": [[10], [6], [10], [6]],
"axis": 0
},
{"shapes": [[10]], "axis": 0},
{"shapes": [[10], [6]], "axis": 0},
{"shapes": [[10], [6], [8]], "axis": 0},
{"shapes": [[10], [6], [10], [6]], "axis": 0},
# 2D tensor with 1~4 inputs
{
"shapes": [[8, 5]],
"axis": 1
},
{
"shapes": [[8, 5], [8, 8]],
"axis": 1
},
{
"shapes": [[8, 5], [8, 5], [16, 5]],
"axis": 0
},
{
"shapes": [[8, 5], [8, 5], [8, 5], [8, 5]],
"axis": 0
},
{"shapes": [[8, 5]], "axis": 1},
{"shapes": [[8, 5], [8, 8]], "axis": 1},
{"shapes": [[8, 5], [8, 5], [16, 5]], "axis": 0},
{"shapes": [[8, 5], [8, 5], [8, 5], [8, 5]], "axis": 0},
# 3D tensor with 1~4 inputs
{
"shapes": [[10, 3, 5]],
"axis": 0
},
{
"shapes": [[10, 3, 5], [10, 7, 5]],
"axis": 1
},
{
"shapes": [[10, 3, 5], [10, 3, 6], [10, 3, 7]],
"axis": 2
},
{
"shapes": [[10, 3, 5], [4, 3, 5], [2, 3, 5]],
"axis": 0
},
{"shapes": [[10, 3, 5]], "axis": 0},
{"shapes": [[10, 3, 5], [10, 7, 5]], "axis": 1},
{"shapes": [[10, 3, 5], [10, 3, 6], [10, 3, 7]], "axis": 2},
{"shapes": [[10, 3, 5], [4, 3, 5], [2, 3, 5]], "axis": 0},
# 4D tensor with 1~4 inputs
{
"shapes": [[80, 1, 5, 7]],
"axis": 0
},
{
"shapes": [[80, 1, 5, 7], [80, 79, 5, 7]],
"axis": 1
},
{"shapes": [[80, 1, 5, 7]], "axis": 0},
{"shapes": [[80, 1, 5, 7], [80, 79, 5, 7]], "axis": 1},
{
"shapes": [[80, 1, 50, 7], [80, 1, 5, 7], [80, 1, 10, 7]],
"axis": 2
"axis": 2,
},
{
"shapes": [[80, 1, 5, 17], [80, 1, 5, 27], [80, 1, 5, 37],
[80, 1, 5, 47]],
"axis":
3
"shapes": [
[80, 1, 5, 17],
[80, 1, 5, 27],
[80, 1, 5, 37],
[80, 1, 5, 47],
],
"axis": 3,
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = []
......@@ -271,86 +209,30 @@ class TestConcatOpAttrs(TestCaseHelper):
self.cls = TestConcatOp
self.inputs = [
# 1D tensor
{
"shapes": [[10], [8]],
"axis": 0
},
{
"shapes": [[10], [6]],
"axis": -1
},
{"shapes": [[10], [8]], "axis": 0},
{"shapes": [[10], [6]], "axis": -1},
# 2D tensor
{
"shapes": [[8, 5], [10, 5]],
"axis": 0
},
{
"shapes": [[8, 5], [8, 8]],
"axis": 1
},
{"shapes": [[8, 5], [10, 5]], "axis": 0},
{"shapes": [[8, 5], [8, 8]], "axis": 1},
# 3D tensor
{
"shapes": [[10, 3, 5], [10, 3, 5]],
"axis": 0
},
{
"shapes": [[10, 3, 5], [10, 7, 5]],
"axis": 1
},
{
"shapes": [[10, 3, 15], [10, 3, 5]],
"axis": 2
},
{
"shapes": [[10, 3, 7], [10, 3, 5]],
"axis": -1
},
{
"shapes": [[10, 3, 5], [10, 7, 5]],
"axis": -2
},
{
"shapes": [[10, 7, 5], [20, 7, 5]],
"axis": -3
},
{"shapes": [[10, 3, 5], [10, 3, 5]], "axis": 0},
{"shapes": [[10, 3, 5], [10, 7, 5]], "axis": 1},
{"shapes": [[10, 3, 15], [10, 3, 5]], "axis": 2},
{"shapes": [[10, 3, 7], [10, 3, 5]], "axis": -1},
{"shapes": [[10, 3, 5], [10, 7, 5]], "axis": -2},
{"shapes": [[10, 7, 5], [20, 7, 5]], "axis": -3},
# 4D tensor
{
"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]],
"axis": 0
},
{
"shapes": [[80, 1, 5, 7], [80, 79, 5, 7]],
"axis": 1
},
{
"shapes": [[80, 1, 5, 7], [80, 1, 10, 7]],
"axis": 2
},
{
"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]],
"axis": 3
},
{
"shapes": [[80, 1, 5, 7], [80, 1, 5, 13]],
"axis": -1
},
{
"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]],
"axis": -2
},
{
"shapes": [[80, 15, 5, 7], [80, 5, 5, 7]],
"axis": -3
},
{
"shapes": [[80, 1, 5, 7], [20, 1, 5, 7]],
"axis": -4
},
{"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]], "axis": 0},
{"shapes": [[80, 1, 5, 7], [80, 79, 5, 7]], "axis": 1},
{"shapes": [[80, 1, 5, 7], [80, 1, 10, 7]], "axis": 2},
{"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]], "axis": 3},
{"shapes": [[80, 1, 5, 7], [80, 1, 5, 13]], "axis": -1},
{"shapes": [[80, 1, 5, 7], [80, 1, 5, 7]], "axis": -2},
{"shapes": [[80, 15, 5, 7], [80, 5, 5, 7]], "axis": -3},
{"shapes": [[80, 1, 5, 7], [20, 1, 5, 7]], "axis": -4},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = []
......
......@@ -21,8 +21,9 @@ from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestConstantOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -104,9 +105,7 @@ class TestConstantOpShape(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float32"
},
{"dtype": "float32"},
]
self.attrs = []
......@@ -130,30 +129,14 @@ class TestConstantOpDtype(TestCaseHelper):
},
]
self.dtypes = [
{
"dtype": "float16"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
{
"dtype": "bool"
},
{
"dtype": "uint8"
},
{
"dtype": "int8"
},
{
"dtype": "int32"
},
{
"dtype": "int64"
},
{"dtype": "float16"},
{"dtype": "float32"},
{"dtype": "float64"},
{"dtype": "bool"},
{"dtype": "uint8"},
{"dtype": "int8"},
{"dtype": "int32"},
{"dtype": "int64"},
]
self.attrs = []
......
此差异已折叠。
......@@ -23,8 +23,9 @@ from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
@OpTestTool.skip_if(
not is_compiled_with_cuda(), "x86 test will be skipped due to timeout."
)
class TestCosOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
......@@ -32,7 +33,8 @@ class TestCosOp(OpTest):
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
shape=self.case["x_shape"], dtype=self.case["x_dtype"]
)
def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True)
......@@ -42,8 +44,10 @@ class TestCosOp(OpTest):
def build_cinn_program(self, target):
builder = NetBuilder("unary_elementwise_test")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
self.nptype2cinntype(self.case["x_dtype"]),
self.case["x_shape"],
"x",
)
out = builder.cos(x)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])
......@@ -58,24 +62,34 @@ class TestCosOpShape(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestCosOpShape"
self.cls = TestCosOp
self.inputs = [{
"x_shape": [1],
}, {
"x_shape": [1024],
}, {
"x_shape": [1, 2048],
}, {
"x_shape": [1, 1, 1],
}, {
"x_shape": [32, 64],
}, {
"x_shape": [16, 8, 4, 2],
}, {
"x_shape": [16, 8, 4, 2, 1],
}]
self.dtypes = [{
"x_dtype": "float32",
}]
self.inputs = [
{
"x_shape": [1],
},
{
"x_shape": [1024],
},
{
"x_shape": [1, 2048],
},
{
"x_shape": [1, 1, 1],
},
{
"x_shape": [32, 64],
},
{
"x_shape": [16, 8, 4, 2],
},
{
"x_shape": [16, 8, 4, 2, 1],
},
]
self.dtypes = [
{
"x_dtype": "float32",
}
]
self.attrs = []
......@@ -83,14 +97,13 @@ class TestCosOpDtype(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestCosOpDtype"
self.cls = TestCosOp
self.inputs = [{
"x_shape": [32, 64],
}]
self.dtypes = [
self.inputs = [
{
"x_dtype": "float16",
"max_relative_error": 1e-3
},
"x_shape": [32, 64],
}
]
self.dtypes = [
{"x_dtype": "float16", "max_relative_error": 1e-3},
{
"x_dtype": "float32",
},
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册