未验证 提交 402e277f 编写于 作者: G gouzil 提交者: GitHub

[CodeStyle][CINN] fix Ruff lint errors (pyupgrade rules) (#54988)

上级 1899505d
......@@ -35,7 +35,7 @@ def main():
srcs.append(')ROC"')
srcs.append(');\n')
cmd = "{} --version".format(llvm_config)
cmd = f"{llvm_config} --version"
version = (
subprocess.check_output(cmd, shell=True)
.decode('utf-8')
......
......@@ -101,7 +101,6 @@ ignore = [
"python/cinn/**" = [
"F401",
"F403",
"UP004",
]
"test/cinn/**" = [
"F401",
......@@ -112,23 +111,12 @@ ignore = [
"F901",
"C408",
"C417",
"UP004",
"UP008",
"UP027",
"UP032",
"UP034",
"PLR0402",
"PLC0414",
"PLE1205",
]
"paddle/cinn/**" = [
"UP032",
]
"tools/cinn/**" = [
"F401",
"C416",
"UP004",
"UP031",
"UP032",
"PLR0402",
]
......@@ -21,7 +21,7 @@ class CostModelType(enum.Enum):
XGB = 1
class CostModel(object):
class CostModel:
"""
A base class to call different cost model algorithm.
"""
......
......@@ -16,7 +16,7 @@ import numpy as np
import xgboost as xgb
class XgbCostModel(object):
class XgbCostModel:
"""
A cost model implemented by XgbCostModel
"""
......
......@@ -42,7 +42,7 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):
elif key == "data_format":
data_format = attrs.get_attr("data_format")
else:
raise ValueError("attr_store {} is not supported".format(key))
raise ValueError(f"attr_store {key} is not supported")
img = static.data(name='img', shape=input_shape[1:], dtype='float32')
if is_depthwise:
......
......@@ -22,7 +22,7 @@ logger = logging.getLogger(name="pass_test")
class FusionTest(PassTest):
def __init__(self, *args, **kwargs):
super(FusionTest, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
def init_input_data(self):
"""Set feed data"""
......@@ -44,9 +44,7 @@ class FusionTest(PassTest):
fusion_passes = ["OpFusionPass", "FusionMergePass"]
real_group_size = self.get_pass_size(base_passes + fusion_passes)
logger.debug(
"The model has been fused into {} groups".format(real_group_size)
)
logger.debug(f"The model has been fused into {real_group_size} groups")
self.assertEqual(
real_group_size,
group_size,
......
......@@ -38,7 +38,7 @@ paddle.enable_static()
class OpMapperTest(OpTest):
def __init__(self, *args, **kwargs):
super(OpMapperTest, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self._init_place()
self.init_input_data()
......
......@@ -58,7 +58,7 @@ def convert_uint16_to_float(data):
class OpTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OpTest, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self._init_target()
self._init_results()
self._init_seed()
......@@ -304,7 +304,7 @@ class OpTest(unittest.TestCase):
error_message = "[Check " + name + "] " + error_message
logger.debug("{} {}".format(is_allclose, error_message))
logger.debug(f"{is_allclose} {error_message}")
self.assertTrue(is_allclose, msg=error_message)
@staticmethod
......
......@@ -70,7 +70,7 @@ class TestBitcastConvertCase1(TestBitcastConvertOp):
packed = pack(data.size * 'h', *data.flatten())
self.inputs = {"x": data}
self.outputs = {
"y": np.array(unpack('4i', packed), dtype='int32').reshape((4)),
"y": np.array(unpack('4i', packed), dtype='int32').reshape(4),
"output_type": "int32",
}
......
......@@ -55,7 +55,7 @@ class TestGatherNdOp(OpTest):
x = paddle.to_tensor(x, stop_gradient=False)
index = paddle.to_tensor(index, stop_gradient=False)
out = paddle.gather_nd(x, index)
logger.debug(" -- The output of Paddle:\n{}".format(out))
logger.debug(f" -- The output of Paddle:\n{out}")
self.paddle_outputs.append(out)
def build_cinn_program(self, target):
......@@ -70,7 +70,7 @@ class TestGatherNdOp(OpTest):
res = self.get_cinn_output(
prog, target, [x, index], self.data[i], [out]
)
logger.debug(" -- The output of CINN:\n{}".format(res))
logger.debug(f" -- The output of CINN:\n{res}")
self.cinn_outputs.extend(res)
def test_check_results(self):
......
......@@ -54,7 +54,7 @@ class TestGatherOp(OpTest):
x = paddle.to_tensor(x, stop_gradient=False)
index = paddle.to_tensor(index, stop_gradient=False)
out = paddle.gather(x, index, axis)
logger.debug(" -- The output of Paddle:\n{}".format(out))
logger.debug(f" -- The output of Paddle:\n{out}")
self.paddle_outputs.append(out)
def build_cinn_program(self, target):
......@@ -67,7 +67,7 @@ class TestGatherOp(OpTest):
out = builder.gather(x, index, axis=axis)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x, index], self.data, [out])
logger.debug(" -- The output of CINN:\n{}".format(res))
logger.debug(f" -- The output of CINN:\n{res}")
self.cinn_outputs.extend(res)
def test_check_results(self):
......
......@@ -79,7 +79,7 @@ class TestScatterAddOp(OpTest):
[i, j, k, self.inputs["index"][l]]
)
else:
self.assertTrue(False, "Axis {} No Implement".format(pos_axis))
self.assertTrue(False, f"Axis {pos_axis} No Implement")
index = paddle.to_tensor(index_nd, stop_gradient=True)
res = paddle.scatter_nd_add(x, index, y)
......
......@@ -74,7 +74,7 @@ class TestScatterAssignOpBase(OpTest):
l
]
else:
self.assertTrue(False, "Axis {} No Implement".format(self.axis))
self.assertTrue(False, f"Axis {self.axis} No Implement")
pd_out = paddle.to_tensor(out, stop_gradient=True)
self.paddle_outputs = [pd_out]
......
......@@ -195,7 +195,7 @@ def create_unit_test(
def cinn_func(self, builder, *args):
return eval(fn_cinn)(*args)
cls_name = "{}_{}".format(parent.__name__, test_name)
cls_name = f"{parent.__name__}_{test_name}"
TestClass.__name__ = cls_name
globals()[cls_name] = TestClass
......
......@@ -29,7 +29,7 @@ logger = logging.getLogger(name="pass_test")
class PassTest(OpTest):
def __init__(self, *args, **kwargs):
super(PassTest, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.init_input_data()
def init_input_data(self) -> dict:
......@@ -82,9 +82,9 @@ class PassTest(OpTest):
def get_pass_size(self, passes):
pass_prog, _, outputs = self.run_program()
fetch_ids = {str(out) for out in outputs}
logger.debug("Before pass {}:\n{}".format(passes, str(pass_prog)))
logger.debug(f"Before pass {passes}:\n{str(pass_prog)}")
op_num = pass_prog.apply_pass(fetch_ids, self.target, passes)
logger.debug("After pass {}:\n{}".format(passes, str(pass_prog)))
logger.debug(f"After pass {passes}:\n{str(pass_prog)}")
return op_num
def check_pass_outputs(
......@@ -97,9 +97,7 @@ class PassTest(OpTest):
equal_nan=False,
):
base_pass_size = self.get_pass_size(base_passes)
logger.debug(
"Pass after base pass optimize has {} ops".format(base_pass_size)
)
logger.debug(f"Pass after base pass optimize has {base_pass_size} ops")
test_pass_size = self.get_pass_size(base_passes + test_passes)
logger.debug(
"Pass after base and test pass optimize has {} ops".format(
......
......@@ -41,7 +41,7 @@ def pool2d(np_data, attrs, dtype="float32"):
elif key == "data_format":
data_format = attrs.get_attr("data_format")
else:
raise ValueError("attr_store {} is not supported".format(key))
raise ValueError(f"attr_store {key} is not supported")
if data_format == "NCHW":
in_n, in_c, in_h, in_w = in_shape = np_data.shape
......@@ -52,7 +52,7 @@ def pool2d(np_data, attrs, dtype="float32"):
height_axis = 1
width_axis = 2
else:
raise ValueError("data_format {} is not supported".format(data_format))
raise ValueError(f"data_format {data_format} is not supported")
if isinstance(kernel_size, int):
k_h = k_w = kernel_size
......@@ -205,7 +205,7 @@ def pool2d(np_data, attrs, dtype="float32"):
axis=(height_axis, width_axis),
)
else:
raise ValueError("pool type {} is not supported".format(pool_type))
raise ValueError(f"pool type {pool_type} is not supported")
ret_np = np.maximum(ret_np, fill_value)
return ret_np, [out_shape]
......@@ -232,7 +232,7 @@ def pool3d(np_data, attrs, dtype="float32"):
elif key == "data_format":
data_format = attrs.get_attr("data_format")
else:
raise ValueError("attr_store {} is not supported".format(key))
raise ValueError(f"attr_store {key} is not supported")
if data_format == "NCDHW":
in_n, in_c, in_d, in_h, in_w = in_shape = np_data.shape
......@@ -245,7 +245,7 @@ def pool3d(np_data, attrs, dtype="float32"):
height_axis = 2
width_axis = 3
else:
raise ValueError("data_format {} is not supported".format(data_format))
raise ValueError(f"data_format {data_format} is not supported")
if isinstance(kernel_size, int):
k_d = k_h = k_w = kernel_size
......@@ -416,7 +416,7 @@ def pool3d(np_data, attrs, dtype="float32"):
axis=(depth_axis, height_axis, width_axis),
)
else:
raise ValueError("pool type {} is not supported".format(pool_type))
raise ValueError(f"pool type {pool_type} is not supported")
ret_np = np.maximum(ret_np, fill_value)
return ret_np, [out_shape]
......@@ -443,7 +443,7 @@ def pool1d(np_data, attrs, dtype="float32"):
elif key == "data_format":
data_format = attrs.get_attr("data_format")
else:
raise ValueError("attr_store {} is not supported".format(key))
raise ValueError(f"attr_store {key} is not supported")
if data_format == "NCW":
in_n, in_c, in_w = in_shape = np_data.shape
......@@ -452,7 +452,7 @@ def pool1d(np_data, attrs, dtype="float32"):
in_n, in_w, in_c = in_shape = np_data.shape
width_axis = 1
else:
raise ValueError("data_format {} is not supported".format(data_format))
raise ValueError(f"data_format {data_format} is not supported")
if isinstance(kernel_size, int):
k_w = kernel_size
......@@ -539,7 +539,7 @@ def pool1d(np_data, attrs, dtype="float32"):
pad_np[:, k * s_w : k * s_w + k_w, :], axis=width_axis
)
else:
raise ValueError("pool type {} is not supported".format(pool_type))
raise ValueError(f"pool type {pool_type} is not supported")
ret_np = np.maximum(ret_np, fill_value)
return ret_np, [out_shape]
......@@ -63,7 +63,7 @@ class TestMamul(unittest.TestCase):
def create_matmul_basic(target, m, n, k):
m, n, k = [ir.Expr(_) for _ in (m, n, k)]
m, n, k = (ir.Expr(_) for _ in (m, n, k))
a = lang.Placeholder("float32", "A", [m, k])
b = lang.Placeholder("float32", "B", [k, n])
......@@ -90,7 +90,7 @@ def create_matmul_basic(target, m, n, k):
def create_matmul_tile(target, m, n, k):
m, n, k = [ir.Expr(_) for _ in [m, n, k]]
m, n, k = (ir.Expr(_) for _ in [m, n, k])
a = lang.Placeholder("float32", "A", [m, k])
b = lang.Placeholder("float32", "B", [k, n])
......
......@@ -43,7 +43,7 @@ class TestPackedFunc(unittest.TestCase):
self.assertEqual(mul(4, 5), 20)
def test_callable_object(self):
class Accumulator(object):
class Accumulator:
def __init__(self, init):
self.init = init
......
......@@ -165,9 +165,9 @@ class TestPaddleModel(OpMapperTest):
return_numpy=True,
)
logger.debug(msg="Program:\n{}".format(self.inference_program))
logger.debug(msg="Param List: {}".format(self.param_vars.keys()))
logger.debug(msg="Feed List: {}".format(self.feed_names))
logger.debug(msg=f"Program:\n{self.inference_program}")
logger.debug(msg=f"Param List: {self.param_vars.keys()}")
logger.debug(msg=f"Feed List: {self.feed_names}")
logger.debug(
msg="Fetch List: {}".format(
[var.name for var in self.fetch_targets]
......@@ -195,7 +195,7 @@ class TestPaddleModel(OpMapperTest):
fetch_list=self.fetch_targets,
return_numpy=True,
)
logger.debug("Paddle Result:\n{}".format(self.paddle_outputs))
logger.debug(f"Paddle Result:\n{self.paddle_outputs}")
def build_cinn_program(self, target):
self.assertEqual(
......@@ -237,9 +237,7 @@ class TestPaddleModel(OpMapperTest):
# get cinn input list
inputs = prog.get_inputs()
logger.debug(
"CINN Input List: {}".format([var.name() for var in inputs])
)
logger.debug(f"CINN Input List: {[var.name() for var in inputs]}")
self.assertEqual(
len(feed_with_param),
len(inputs),
......@@ -284,7 +282,7 @@ class TestPaddleModel(OpMapperTest):
prog, target, cinn_inputs, cinn_feed_datas, cinn_output, passes=[]
)
logger.debug("CINN Result:\n{}".format(self.cinn_outputs))
logger.debug(f"CINN Result:\n{self.cinn_outputs}")
def test_check_results(self):
# TODO(6clc): There is a random accuracy problem,
......
......@@ -113,13 +113,13 @@ class TestPEElementwise(unittest.TestCase):
is_round=False,
is_bool=False,
):
m, n = [
m, n = (
ir.Expr(_)
for _ in (
self.m,
self.n,
)
]
)
x = lang.Placeholder(dtype, "x", [m, n])
y = cinn_fn(x.to_tensor())
......
......@@ -100,13 +100,13 @@ class TestPEReduction(unittest.TestCase):
self.reduction_tester(fn_name, pe_fn, np_fn, [1], False)
def reduction_tester(self, fn_name, cinn_fn, np_fn, axes, keep_dims):
m, n = [
m, n = (
ir.Expr(_)
for _ in (
self.m,
self.n,
)
]
)
x = lang.Placeholder("float32", "x", [m, n])
func_name = "test_" + fn_name
y = cinn_fn(x.to_tensor(), axes, keep_dims)
......
......@@ -53,14 +53,14 @@ class TestPETransform(unittest.TestCase):
def transform_matmul_tester(
self, fn_name, cinn_fn, np_fn, trans_a, trans_b, alpha
):
m, n, k = [
m, n, k = (
ir.Expr(_)
for _ in (
self.m,
self.n,
self.k,
)
]
)
x_shape_expr = [k, m] if trans_a else [m, k]
y_shape_expr = [n, k] if trans_b else [k, n]
x = lang.Placeholder("float32", "x", x_shape_expr)
......
......@@ -28,7 +28,7 @@ import sys
from typing import List
class Markdown(object):
class Markdown:
'''
A simple markdown generator.
'''
......@@ -92,7 +92,7 @@ class Mark:
roc = "@ROC"
class ContentGenerator(object):
class ContentGenerator:
'''
Interface for some content passed into the parser.
'''
......@@ -104,7 +104,7 @@ class ContentGenerator(object):
pass
class Parser(object):
class Parser:
DOC_COMMENT_PREFIX = "//!"
def __init__(self):
......
......@@ -49,7 +49,7 @@ def main():
predictor.zero_copy_run()
time2 = time.time()
total_inference_cost = (time2 - time1) * 1000 # total time cost(ms)
print("Average latency : {} ms".format(total_inference_cost / repeat))
print(f"Average latency : {total_inference_cost / repeat} ms")
output_names = predictor.get_output_names()
output_tensor = predictor.get_output_tensor(output_names[0])
output_data = output_tensor.copy_to_cpu()
......
......@@ -240,14 +240,16 @@ def tune_and_evaluate(func):
np.array(evaluator_preheat().results) * 1000
) # convert to millisecond
print(
"[PreHeat]Mean inference time (std dev): %.4f ms (%.4f ms)"
% (np.mean(prof_res1), np.std(prof_res1))
"[PreHeat]Mean inference time (std dev): {:.4f} ms ({:.4f} ms)".format(
np.mean(prof_res1), np.std(prof_res1)
)
)
prof_res2 = np.array(evaluator().results) * 1000 # convert to millisecond
print(
"[Benchmark]Mean inference time (std dev): %.4f ms (%.4f ms)"
% (np.mean(prof_res2), np.std(prof_res2))
"[Benchmark]Mean inference time (std dev): {:.4f} ms ({:.4f} ms)".format(
np.mean(prof_res2), np.std(prof_res2)
)
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册