未验证 提交 402e277f 编写于 作者: G gouzil 提交者: GitHub

[CodeStyle][CINN] fix Ruff lint errors (pyupgrade rules) (#54988)

上级 1899505d
...@@ -35,7 +35,7 @@ def main(): ...@@ -35,7 +35,7 @@ def main():
srcs.append(')ROC"') srcs.append(')ROC"')
srcs.append(');\n') srcs.append(');\n')
cmd = "{} --version".format(llvm_config) cmd = f"{llvm_config} --version"
version = ( version = (
subprocess.check_output(cmd, shell=True) subprocess.check_output(cmd, shell=True)
.decode('utf-8') .decode('utf-8')
......
...@@ -101,7 +101,6 @@ ignore = [ ...@@ -101,7 +101,6 @@ ignore = [
"python/cinn/**" = [ "python/cinn/**" = [
"F401", "F401",
"F403", "F403",
"UP004",
] ]
"test/cinn/**" = [ "test/cinn/**" = [
"F401", "F401",
...@@ -112,23 +111,12 @@ ignore = [ ...@@ -112,23 +111,12 @@ ignore = [
"F901", "F901",
"C408", "C408",
"C417", "C417",
"UP004",
"UP008",
"UP027",
"UP032",
"UP034",
"PLR0402", "PLR0402",
"PLC0414", "PLC0414",
"PLE1205", "PLE1205",
] ]
"paddle/cinn/**" = [
"UP032",
]
"tools/cinn/**" = [ "tools/cinn/**" = [
"F401", "F401",
"C416", "C416",
"UP004",
"UP031",
"UP032",
"PLR0402", "PLR0402",
] ]
...@@ -21,7 +21,7 @@ class CostModelType(enum.Enum): ...@@ -21,7 +21,7 @@ class CostModelType(enum.Enum):
XGB = 1 XGB = 1
class CostModel(object): class CostModel:
""" """
A base class to call different cost model algorithm. A base class to call different cost model algorithm.
""" """
......
...@@ -16,7 +16,7 @@ import numpy as np ...@@ -16,7 +16,7 @@ import numpy as np
import xgboost as xgb import xgboost as xgb
class XgbCostModel(object): class XgbCostModel:
""" """
A cost model implemented by XgbCostModel A cost model implemented by XgbCostModel
""" """
......
...@@ -42,7 +42,7 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise): ...@@ -42,7 +42,7 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):
elif key == "data_format": elif key == "data_format":
data_format = attrs.get_attr("data_format") data_format = attrs.get_attr("data_format")
else: else:
raise ValueError("attr_store {} is not supported".format(key)) raise ValueError(f"attr_store {key} is not supported")
img = static.data(name='img', shape=input_shape[1:], dtype='float32') img = static.data(name='img', shape=input_shape[1:], dtype='float32')
if is_depthwise: if is_depthwise:
......
...@@ -22,7 +22,7 @@ logger = logging.getLogger(name="pass_test") ...@@ -22,7 +22,7 @@ logger = logging.getLogger(name="pass_test")
class FusionTest(PassTest): class FusionTest(PassTest):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(FusionTest, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
def init_input_data(self): def init_input_data(self):
"""Set feed data""" """Set feed data"""
...@@ -44,9 +44,7 @@ class FusionTest(PassTest): ...@@ -44,9 +44,7 @@ class FusionTest(PassTest):
fusion_passes = ["OpFusionPass", "FusionMergePass"] fusion_passes = ["OpFusionPass", "FusionMergePass"]
real_group_size = self.get_pass_size(base_passes + fusion_passes) real_group_size = self.get_pass_size(base_passes + fusion_passes)
logger.debug( logger.debug(f"The model has been fused into {real_group_size} groups")
"The model has been fused into {} groups".format(real_group_size)
)
self.assertEqual( self.assertEqual(
real_group_size, real_group_size,
group_size, group_size,
......
...@@ -38,7 +38,7 @@ paddle.enable_static() ...@@ -38,7 +38,7 @@ paddle.enable_static()
class OpMapperTest(OpTest): class OpMapperTest(OpTest):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(OpMapperTest, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._init_place() self._init_place()
self.init_input_data() self.init_input_data()
......
...@@ -58,7 +58,7 @@ def convert_uint16_to_float(data): ...@@ -58,7 +58,7 @@ def convert_uint16_to_float(data):
class OpTest(unittest.TestCase): class OpTest(unittest.TestCase):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(OpTest, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._init_target() self._init_target()
self._init_results() self._init_results()
self._init_seed() self._init_seed()
...@@ -304,7 +304,7 @@ class OpTest(unittest.TestCase): ...@@ -304,7 +304,7 @@ class OpTest(unittest.TestCase):
error_message = "[Check " + name + "] " + error_message error_message = "[Check " + name + "] " + error_message
logger.debug("{} {}".format(is_allclose, error_message)) logger.debug(f"{is_allclose} {error_message}")
self.assertTrue(is_allclose, msg=error_message) self.assertTrue(is_allclose, msg=error_message)
@staticmethod @staticmethod
......
...@@ -70,7 +70,7 @@ class TestBitcastConvertCase1(TestBitcastConvertOp): ...@@ -70,7 +70,7 @@ class TestBitcastConvertCase1(TestBitcastConvertOp):
packed = pack(data.size * 'h', *data.flatten()) packed = pack(data.size * 'h', *data.flatten())
self.inputs = {"x": data} self.inputs = {"x": data}
self.outputs = { self.outputs = {
"y": np.array(unpack('4i', packed), dtype='int32').reshape((4)), "y": np.array(unpack('4i', packed), dtype='int32').reshape(4),
"output_type": "int32", "output_type": "int32",
} }
......
...@@ -55,7 +55,7 @@ class TestGatherNdOp(OpTest): ...@@ -55,7 +55,7 @@ class TestGatherNdOp(OpTest):
x = paddle.to_tensor(x, stop_gradient=False) x = paddle.to_tensor(x, stop_gradient=False)
index = paddle.to_tensor(index, stop_gradient=False) index = paddle.to_tensor(index, stop_gradient=False)
out = paddle.gather_nd(x, index) out = paddle.gather_nd(x, index)
logger.debug(" -- The output of Paddle:\n{}".format(out)) logger.debug(f" -- The output of Paddle:\n{out}")
self.paddle_outputs.append(out) self.paddle_outputs.append(out)
def build_cinn_program(self, target): def build_cinn_program(self, target):
...@@ -70,7 +70,7 @@ class TestGatherNdOp(OpTest): ...@@ -70,7 +70,7 @@ class TestGatherNdOp(OpTest):
res = self.get_cinn_output( res = self.get_cinn_output(
prog, target, [x, index], self.data[i], [out] prog, target, [x, index], self.data[i], [out]
) )
logger.debug(" -- The output of CINN:\n{}".format(res)) logger.debug(f" -- The output of CINN:\n{res}")
self.cinn_outputs.extend(res) self.cinn_outputs.extend(res)
def test_check_results(self): def test_check_results(self):
......
...@@ -54,7 +54,7 @@ class TestGatherOp(OpTest): ...@@ -54,7 +54,7 @@ class TestGatherOp(OpTest):
x = paddle.to_tensor(x, stop_gradient=False) x = paddle.to_tensor(x, stop_gradient=False)
index = paddle.to_tensor(index, stop_gradient=False) index = paddle.to_tensor(index, stop_gradient=False)
out = paddle.gather(x, index, axis) out = paddle.gather(x, index, axis)
logger.debug(" -- The output of Paddle:\n{}".format(out)) logger.debug(f" -- The output of Paddle:\n{out}")
self.paddle_outputs.append(out) self.paddle_outputs.append(out)
def build_cinn_program(self, target): def build_cinn_program(self, target):
...@@ -67,7 +67,7 @@ class TestGatherOp(OpTest): ...@@ -67,7 +67,7 @@ class TestGatherOp(OpTest):
out = builder.gather(x, index, axis=axis) out = builder.gather(x, index, axis=axis)
prog = builder.build() prog = builder.build()
res = self.get_cinn_output(prog, target, [x, index], self.data, [out]) res = self.get_cinn_output(prog, target, [x, index], self.data, [out])
logger.debug(" -- The output of CINN:\n{}".format(res)) logger.debug(f" -- The output of CINN:\n{res}")
self.cinn_outputs.extend(res) self.cinn_outputs.extend(res)
def test_check_results(self): def test_check_results(self):
......
...@@ -79,7 +79,7 @@ class TestScatterAddOp(OpTest): ...@@ -79,7 +79,7 @@ class TestScatterAddOp(OpTest):
[i, j, k, self.inputs["index"][l]] [i, j, k, self.inputs["index"][l]]
) )
else: else:
self.assertTrue(False, "Axis {} No Implement".format(pos_axis)) self.assertTrue(False, f"Axis {pos_axis} No Implement")
index = paddle.to_tensor(index_nd, stop_gradient=True) index = paddle.to_tensor(index_nd, stop_gradient=True)
res = paddle.scatter_nd_add(x, index, y) res = paddle.scatter_nd_add(x, index, y)
......
...@@ -74,7 +74,7 @@ class TestScatterAssignOpBase(OpTest): ...@@ -74,7 +74,7 @@ class TestScatterAssignOpBase(OpTest):
l l
] ]
else: else:
self.assertTrue(False, "Axis {} No Implement".format(self.axis)) self.assertTrue(False, f"Axis {self.axis} No Implement")
pd_out = paddle.to_tensor(out, stop_gradient=True) pd_out = paddle.to_tensor(out, stop_gradient=True)
self.paddle_outputs = [pd_out] self.paddle_outputs = [pd_out]
......
...@@ -195,7 +195,7 @@ def create_unit_test( ...@@ -195,7 +195,7 @@ def create_unit_test(
def cinn_func(self, builder, *args): def cinn_func(self, builder, *args):
return eval(fn_cinn)(*args) return eval(fn_cinn)(*args)
cls_name = "{}_{}".format(parent.__name__, test_name) cls_name = f"{parent.__name__}_{test_name}"
TestClass.__name__ = cls_name TestClass.__name__ = cls_name
globals()[cls_name] = TestClass globals()[cls_name] = TestClass
......
...@@ -29,7 +29,7 @@ logger = logging.getLogger(name="pass_test") ...@@ -29,7 +29,7 @@ logger = logging.getLogger(name="pass_test")
class PassTest(OpTest): class PassTest(OpTest):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(PassTest, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.init_input_data() self.init_input_data()
def init_input_data(self) -> dict: def init_input_data(self) -> dict:
...@@ -82,9 +82,9 @@ class PassTest(OpTest): ...@@ -82,9 +82,9 @@ class PassTest(OpTest):
def get_pass_size(self, passes): def get_pass_size(self, passes):
pass_prog, _, outputs = self.run_program() pass_prog, _, outputs = self.run_program()
fetch_ids = {str(out) for out in outputs} fetch_ids = {str(out) for out in outputs}
logger.debug("Before pass {}:\n{}".format(passes, str(pass_prog))) logger.debug(f"Before pass {passes}:\n{str(pass_prog)}")
op_num = pass_prog.apply_pass(fetch_ids, self.target, passes) op_num = pass_prog.apply_pass(fetch_ids, self.target, passes)
logger.debug("After pass {}:\n{}".format(passes, str(pass_prog))) logger.debug(f"After pass {passes}:\n{str(pass_prog)}")
return op_num return op_num
def check_pass_outputs( def check_pass_outputs(
...@@ -97,9 +97,7 @@ class PassTest(OpTest): ...@@ -97,9 +97,7 @@ class PassTest(OpTest):
equal_nan=False, equal_nan=False,
): ):
base_pass_size = self.get_pass_size(base_passes) base_pass_size = self.get_pass_size(base_passes)
logger.debug( logger.debug(f"Pass after base pass optimize has {base_pass_size} ops")
"Pass after base pass optimize has {} ops".format(base_pass_size)
)
test_pass_size = self.get_pass_size(base_passes + test_passes) test_pass_size = self.get_pass_size(base_passes + test_passes)
logger.debug( logger.debug(
"Pass after base and test pass optimize has {} ops".format( "Pass after base and test pass optimize has {} ops".format(
......
...@@ -41,7 +41,7 @@ def pool2d(np_data, attrs, dtype="float32"): ...@@ -41,7 +41,7 @@ def pool2d(np_data, attrs, dtype="float32"):
elif key == "data_format": elif key == "data_format":
data_format = attrs.get_attr("data_format") data_format = attrs.get_attr("data_format")
else: else:
raise ValueError("attr_store {} is not supported".format(key)) raise ValueError(f"attr_store {key} is not supported")
if data_format == "NCHW": if data_format == "NCHW":
in_n, in_c, in_h, in_w = in_shape = np_data.shape in_n, in_c, in_h, in_w = in_shape = np_data.shape
...@@ -52,7 +52,7 @@ def pool2d(np_data, attrs, dtype="float32"): ...@@ -52,7 +52,7 @@ def pool2d(np_data, attrs, dtype="float32"):
height_axis = 1 height_axis = 1
width_axis = 2 width_axis = 2
else: else:
raise ValueError("data_format {} is not supported".format(data_format)) raise ValueError(f"data_format {data_format} is not supported")
if isinstance(kernel_size, int): if isinstance(kernel_size, int):
k_h = k_w = kernel_size k_h = k_w = kernel_size
...@@ -205,7 +205,7 @@ def pool2d(np_data, attrs, dtype="float32"): ...@@ -205,7 +205,7 @@ def pool2d(np_data, attrs, dtype="float32"):
axis=(height_axis, width_axis), axis=(height_axis, width_axis),
) )
else: else:
raise ValueError("pool type {} is not supported".format(pool_type)) raise ValueError(f"pool type {pool_type} is not supported")
ret_np = np.maximum(ret_np, fill_value) ret_np = np.maximum(ret_np, fill_value)
return ret_np, [out_shape] return ret_np, [out_shape]
...@@ -232,7 +232,7 @@ def pool3d(np_data, attrs, dtype="float32"): ...@@ -232,7 +232,7 @@ def pool3d(np_data, attrs, dtype="float32"):
elif key == "data_format": elif key == "data_format":
data_format = attrs.get_attr("data_format") data_format = attrs.get_attr("data_format")
else: else:
raise ValueError("attr_store {} is not supported".format(key)) raise ValueError(f"attr_store {key} is not supported")
if data_format == "NCDHW": if data_format == "NCDHW":
in_n, in_c, in_d, in_h, in_w = in_shape = np_data.shape in_n, in_c, in_d, in_h, in_w = in_shape = np_data.shape
...@@ -245,7 +245,7 @@ def pool3d(np_data, attrs, dtype="float32"): ...@@ -245,7 +245,7 @@ def pool3d(np_data, attrs, dtype="float32"):
height_axis = 2 height_axis = 2
width_axis = 3 width_axis = 3
else: else:
raise ValueError("data_format {} is not supported".format(data_format)) raise ValueError(f"data_format {data_format} is not supported")
if isinstance(kernel_size, int): if isinstance(kernel_size, int):
k_d = k_h = k_w = kernel_size k_d = k_h = k_w = kernel_size
...@@ -416,7 +416,7 @@ def pool3d(np_data, attrs, dtype="float32"): ...@@ -416,7 +416,7 @@ def pool3d(np_data, attrs, dtype="float32"):
axis=(depth_axis, height_axis, width_axis), axis=(depth_axis, height_axis, width_axis),
) )
else: else:
raise ValueError("pool type {} is not supported".format(pool_type)) raise ValueError(f"pool type {pool_type} is not supported")
ret_np = np.maximum(ret_np, fill_value) ret_np = np.maximum(ret_np, fill_value)
return ret_np, [out_shape] return ret_np, [out_shape]
...@@ -443,7 +443,7 @@ def pool1d(np_data, attrs, dtype="float32"): ...@@ -443,7 +443,7 @@ def pool1d(np_data, attrs, dtype="float32"):
elif key == "data_format": elif key == "data_format":
data_format = attrs.get_attr("data_format") data_format = attrs.get_attr("data_format")
else: else:
raise ValueError("attr_store {} is not supported".format(key)) raise ValueError(f"attr_store {key} is not supported")
if data_format == "NCW": if data_format == "NCW":
in_n, in_c, in_w = in_shape = np_data.shape in_n, in_c, in_w = in_shape = np_data.shape
...@@ -452,7 +452,7 @@ def pool1d(np_data, attrs, dtype="float32"): ...@@ -452,7 +452,7 @@ def pool1d(np_data, attrs, dtype="float32"):
in_n, in_w, in_c = in_shape = np_data.shape in_n, in_w, in_c = in_shape = np_data.shape
width_axis = 1 width_axis = 1
else: else:
raise ValueError("data_format {} is not supported".format(data_format)) raise ValueError(f"data_format {data_format} is not supported")
if isinstance(kernel_size, int): if isinstance(kernel_size, int):
k_w = kernel_size k_w = kernel_size
...@@ -539,7 +539,7 @@ def pool1d(np_data, attrs, dtype="float32"): ...@@ -539,7 +539,7 @@ def pool1d(np_data, attrs, dtype="float32"):
pad_np[:, k * s_w : k * s_w + k_w, :], axis=width_axis pad_np[:, k * s_w : k * s_w + k_w, :], axis=width_axis
) )
else: else:
raise ValueError("pool type {} is not supported".format(pool_type)) raise ValueError(f"pool type {pool_type} is not supported")
ret_np = np.maximum(ret_np, fill_value) ret_np = np.maximum(ret_np, fill_value)
return ret_np, [out_shape] return ret_np, [out_shape]
...@@ -63,7 +63,7 @@ class TestMamul(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestMamul(unittest.TestCase):
def create_matmul_basic(target, m, n, k): def create_matmul_basic(target, m, n, k):
m, n, k = [ir.Expr(_) for _ in (m, n, k)] m, n, k = (ir.Expr(_) for _ in (m, n, k))
a = lang.Placeholder("float32", "A", [m, k]) a = lang.Placeholder("float32", "A", [m, k])
b = lang.Placeholder("float32", "B", [k, n]) b = lang.Placeholder("float32", "B", [k, n])
...@@ -90,7 +90,7 @@ def create_matmul_basic(target, m, n, k): ...@@ -90,7 +90,7 @@ def create_matmul_basic(target, m, n, k):
def create_matmul_tile(target, m, n, k): def create_matmul_tile(target, m, n, k):
m, n, k = [ir.Expr(_) for _ in [m, n, k]] m, n, k = (ir.Expr(_) for _ in [m, n, k])
a = lang.Placeholder("float32", "A", [m, k]) a = lang.Placeholder("float32", "A", [m, k])
b = lang.Placeholder("float32", "B", [k, n]) b = lang.Placeholder("float32", "B", [k, n])
......
...@@ -43,7 +43,7 @@ class TestPackedFunc(unittest.TestCase): ...@@ -43,7 +43,7 @@ class TestPackedFunc(unittest.TestCase):
self.assertEqual(mul(4, 5), 20) self.assertEqual(mul(4, 5), 20)
def test_callable_object(self): def test_callable_object(self):
class Accumulator(object): class Accumulator:
def __init__(self, init): def __init__(self, init):
self.init = init self.init = init
......
...@@ -165,9 +165,9 @@ class TestPaddleModel(OpMapperTest): ...@@ -165,9 +165,9 @@ class TestPaddleModel(OpMapperTest):
return_numpy=True, return_numpy=True,
) )
logger.debug(msg="Program:\n{}".format(self.inference_program)) logger.debug(msg=f"Program:\n{self.inference_program}")
logger.debug(msg="Param List: {}".format(self.param_vars.keys())) logger.debug(msg=f"Param List: {self.param_vars.keys()}")
logger.debug(msg="Feed List: {}".format(self.feed_names)) logger.debug(msg=f"Feed List: {self.feed_names}")
logger.debug( logger.debug(
msg="Fetch List: {}".format( msg="Fetch List: {}".format(
[var.name for var in self.fetch_targets] [var.name for var in self.fetch_targets]
...@@ -195,7 +195,7 @@ class TestPaddleModel(OpMapperTest): ...@@ -195,7 +195,7 @@ class TestPaddleModel(OpMapperTest):
fetch_list=self.fetch_targets, fetch_list=self.fetch_targets,
return_numpy=True, return_numpy=True,
) )
logger.debug("Paddle Result:\n{}".format(self.paddle_outputs)) logger.debug(f"Paddle Result:\n{self.paddle_outputs}")
def build_cinn_program(self, target): def build_cinn_program(self, target):
self.assertEqual( self.assertEqual(
...@@ -237,9 +237,7 @@ class TestPaddleModel(OpMapperTest): ...@@ -237,9 +237,7 @@ class TestPaddleModel(OpMapperTest):
# get cinn input list # get cinn input list
inputs = prog.get_inputs() inputs = prog.get_inputs()
logger.debug( logger.debug(f"CINN Input List: {[var.name() for var in inputs]}")
"CINN Input List: {}".format([var.name() for var in inputs])
)
self.assertEqual( self.assertEqual(
len(feed_with_param), len(feed_with_param),
len(inputs), len(inputs),
...@@ -284,7 +282,7 @@ class TestPaddleModel(OpMapperTest): ...@@ -284,7 +282,7 @@ class TestPaddleModel(OpMapperTest):
prog, target, cinn_inputs, cinn_feed_datas, cinn_output, passes=[] prog, target, cinn_inputs, cinn_feed_datas, cinn_output, passes=[]
) )
logger.debug("CINN Result:\n{}".format(self.cinn_outputs)) logger.debug(f"CINN Result:\n{self.cinn_outputs}")
def test_check_results(self): def test_check_results(self):
# TODO(6clc): There is a random accuracy problem, # TODO(6clc): There is a random accuracy problem,
......
...@@ -113,13 +113,13 @@ class TestPEElementwise(unittest.TestCase): ...@@ -113,13 +113,13 @@ class TestPEElementwise(unittest.TestCase):
is_round=False, is_round=False,
is_bool=False, is_bool=False,
): ):
m, n = [ m, n = (
ir.Expr(_) ir.Expr(_)
for _ in ( for _ in (
self.m, self.m,
self.n, self.n,
) )
] )
x = lang.Placeholder(dtype, "x", [m, n]) x = lang.Placeholder(dtype, "x", [m, n])
y = cinn_fn(x.to_tensor()) y = cinn_fn(x.to_tensor())
......
...@@ -100,13 +100,13 @@ class TestPEReduction(unittest.TestCase): ...@@ -100,13 +100,13 @@ class TestPEReduction(unittest.TestCase):
self.reduction_tester(fn_name, pe_fn, np_fn, [1], False) self.reduction_tester(fn_name, pe_fn, np_fn, [1], False)
def reduction_tester(self, fn_name, cinn_fn, np_fn, axes, keep_dims): def reduction_tester(self, fn_name, cinn_fn, np_fn, axes, keep_dims):
m, n = [ m, n = (
ir.Expr(_) ir.Expr(_)
for _ in ( for _ in (
self.m, self.m,
self.n, self.n,
) )
] )
x = lang.Placeholder("float32", "x", [m, n]) x = lang.Placeholder("float32", "x", [m, n])
func_name = "test_" + fn_name func_name = "test_" + fn_name
y = cinn_fn(x.to_tensor(), axes, keep_dims) y = cinn_fn(x.to_tensor(), axes, keep_dims)
......
...@@ -53,14 +53,14 @@ class TestPETransform(unittest.TestCase): ...@@ -53,14 +53,14 @@ class TestPETransform(unittest.TestCase):
def transform_matmul_tester( def transform_matmul_tester(
self, fn_name, cinn_fn, np_fn, trans_a, trans_b, alpha self, fn_name, cinn_fn, np_fn, trans_a, trans_b, alpha
): ):
m, n, k = [ m, n, k = (
ir.Expr(_) ir.Expr(_)
for _ in ( for _ in (
self.m, self.m,
self.n, self.n,
self.k, self.k,
) )
] )
x_shape_expr = [k, m] if trans_a else [m, k] x_shape_expr = [k, m] if trans_a else [m, k]
y_shape_expr = [n, k] if trans_b else [k, n] y_shape_expr = [n, k] if trans_b else [k, n]
x = lang.Placeholder("float32", "x", x_shape_expr) x = lang.Placeholder("float32", "x", x_shape_expr)
......
...@@ -28,7 +28,7 @@ import sys ...@@ -28,7 +28,7 @@ import sys
from typing import List from typing import List
class Markdown(object): class Markdown:
''' '''
A simple markdown generator. A simple markdown generator.
''' '''
...@@ -92,7 +92,7 @@ class Mark: ...@@ -92,7 +92,7 @@ class Mark:
roc = "@ROC" roc = "@ROC"
class ContentGenerator(object): class ContentGenerator:
''' '''
Interface for some content passed into the parser. Interface for some content passed into the parser.
''' '''
...@@ -104,7 +104,7 @@ class ContentGenerator(object): ...@@ -104,7 +104,7 @@ class ContentGenerator(object):
pass pass
class Parser(object): class Parser:
DOC_COMMENT_PREFIX = "//!" DOC_COMMENT_PREFIX = "//!"
def __init__(self): def __init__(self):
......
...@@ -49,7 +49,7 @@ def main(): ...@@ -49,7 +49,7 @@ def main():
predictor.zero_copy_run() predictor.zero_copy_run()
time2 = time.time() time2 = time.time()
total_inference_cost = (time2 - time1) * 1000 # total time cost(ms) total_inference_cost = (time2 - time1) * 1000 # total time cost(ms)
print("Average latency : {} ms".format(total_inference_cost / repeat)) print(f"Average latency : {total_inference_cost / repeat} ms")
output_names = predictor.get_output_names() output_names = predictor.get_output_names()
output_tensor = predictor.get_output_tensor(output_names[0]) output_tensor = predictor.get_output_tensor(output_names[0])
output_data = output_tensor.copy_to_cpu() output_data = output_tensor.copy_to_cpu()
......
...@@ -240,14 +240,16 @@ def tune_and_evaluate(func): ...@@ -240,14 +240,16 @@ def tune_and_evaluate(func):
np.array(evaluator_preheat().results) * 1000 np.array(evaluator_preheat().results) * 1000
) # convert to millisecond ) # convert to millisecond
print( print(
"[PreHeat]Mean inference time (std dev): %.4f ms (%.4f ms)" "[PreHeat]Mean inference time (std dev): {:.4f} ms ({:.4f} ms)".format(
% (np.mean(prof_res1), np.std(prof_res1)) np.mean(prof_res1), np.std(prof_res1)
)
) )
prof_res2 = np.array(evaluator().results) * 1000 # convert to millisecond prof_res2 = np.array(evaluator().results) * 1000 # convert to millisecond
print( print(
"[Benchmark]Mean inference time (std dev): %.4f ms (%.4f ms)" "[Benchmark]Mean inference time (std dev): {:.4f} ms ({:.4f} ms)".format(
% (np.mean(prof_res2), np.std(prof_res2)) np.mean(prof_res2), np.std(prof_res2)
)
) )
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册