diff --git a/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py b/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py index cb57d85c32c01fac4180f3b6e073746976d26ca7..257ef3a7215f0c8a8865c380dfe75ce3c14f7fbb 100644 --- a/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py +++ b/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py @@ -35,7 +35,7 @@ def main(): srcs.append(')ROC"') srcs.append(');\n') - cmd = "{} --version".format(llvm_config) + cmd = f"{llvm_config} --version" version = ( subprocess.check_output(cmd, shell=True) .decode('utf-8') diff --git a/pyproject.toml b/pyproject.toml index 254014988c9984b845df9fcce451f957d0c02977..3721ffed72717c74fb1db0db3b64a942830718d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,6 @@ ignore = [ "python/cinn/**" = [ "F401", "F403", - "UP004", ] "test/cinn/**" = [ "F401", @@ -112,23 +111,12 @@ ignore = [ "F901", "C408", "C417", - "UP004", - "UP008", - "UP027", - "UP032", - "UP034", "PLR0402", "PLC0414", "PLE1205", ] -"paddle/cinn/**" = [ - "UP032", -] "tools/cinn/**" = [ "F401", "C416", - "UP004", - "UP031", - "UP032", "PLR0402", ] diff --git a/python/cinn/auto_schedule/cost_model/cost_model.py b/python/cinn/auto_schedule/cost_model/cost_model.py index 1a1307b723aebb93a5f8170cb44550eb2a9417db..7b0d8647f6c0d3fcc8b6f1a16310b23b3d9dc8fd 100644 --- a/python/cinn/auto_schedule/cost_model/cost_model.py +++ b/python/cinn/auto_schedule/cost_model/cost_model.py @@ -21,7 +21,7 @@ class CostModelType(enum.Enum): XGB = 1 -class CostModel(object): +class CostModel: """ A base class to call different cost model algorithm. """ diff --git a/python/cinn/auto_schedule/cost_model/xgb_cost_model.py b/python/cinn/auto_schedule/cost_model/xgb_cost_model.py index db6a123b3d5fbb830ca18003b80e959824f0b1a9..6dc3c8e3baba5b85a6d9de2fb2a6eba8f311a506 100644 --- a/python/cinn/auto_schedule/cost_model/xgb_cost_model.py +++ b/python/cinn/auto_schedule/cost_model/xgb_cost_model.py @@ -16,7 +16,7 @@ import numpy as np import xgboost as xgb -class XgbCostModel(object): +class XgbCostModel: """ A cost model implemented by XgbCostModel """ diff --git a/test/cinn/conv2d_utils.py b/test/cinn/conv2d_utils.py index 18a6cc02d0ec8eb08d5130771de1cecf62160643..9a31e290fe502ad213294adc642309a77f0ab3c1 100644 --- a/test/cinn/conv2d_utils.py +++ b/test/cinn/conv2d_utils.py @@ -42,7 +42,7 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise): elif key == "data_format": data_format = attrs.get_attr("data_format") else: - raise ValueError("attr_store {} is not supported".format(key)) + raise ValueError(f"attr_store {key} is not supported") img = static.data(name='img', shape=input_shape[1:], dtype='float32') if is_depthwise: diff --git a/test/cinn/fusion/fusion_test.py b/test/cinn/fusion/fusion_test.py index 94f2e68df608e5cabbc7924bd8cc8ac3c0ea1329..db11a78c982c4e08b9631120d76433e8a09444b2 100644 --- a/test/cinn/fusion/fusion_test.py +++ b/test/cinn/fusion/fusion_test.py @@ -22,7 +22,7 @@ logger = logging.getLogger(name="pass_test") class FusionTest(PassTest): def __init__(self, *args, **kwargs): - super(FusionTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def init_input_data(self): """Set feed data""" @@ -44,9 +44,7 @@ class FusionTest(PassTest): fusion_passes = ["OpFusionPass", "FusionMergePass"] real_group_size = self.get_pass_size(base_passes + fusion_passes) - logger.debug( - "The model has been fused into {} groups".format(real_group_size) - ) + logger.debug(f"The model has been fused into {real_group_size} groups") self.assertEqual( real_group_size, group_size, diff --git a/test/cinn/op_mappers/op_mapper_test.py b/test/cinn/op_mappers/op_mapper_test.py index 1b4076cd6e8b7c05435625b000ca7e846dbada7f..881dc740a3578b23f2d6b9059abbf920fe80ac49 100644 --- a/test/cinn/op_mappers/op_mapper_test.py +++ b/test/cinn/op_mappers/op_mapper_test.py @@ -38,7 +38,7 @@ paddle.enable_static() class OpMapperTest(OpTest): def __init__(self, *args, **kwargs): - super(OpMapperTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._init_place() self.init_input_data() diff --git a/test/cinn/ops/op_test.py b/test/cinn/ops/op_test.py index 398a3e79721199f66be80e4f4370365bffbc5428..d0952caef29ca19823ef4783846c7116f93d42ab 100755 --- a/test/cinn/ops/op_test.py +++ b/test/cinn/ops/op_test.py @@ -58,7 +58,7 @@ def convert_uint16_to_float(data): class OpTest(unittest.TestCase): def __init__(self, *args, **kwargs): - super(OpTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._init_target() self._init_results() self._init_seed() @@ -304,7 +304,7 @@ class OpTest(unittest.TestCase): error_message = "[Check " + name + "] " + error_message - logger.debug("{} {}".format(is_allclose, error_message)) + logger.debug(f"{is_allclose} {error_message}") self.assertTrue(is_allclose, msg=error_message) @staticmethod diff --git a/test/cinn/ops/test_bitcast_convert_op.py b/test/cinn/ops/test_bitcast_convert_op.py index 2da4aed7cc77f121987748291abd53ace93b8348..30af48b8a24ebcb38e6d699caa55c586927f336c 100644 --- a/test/cinn/ops/test_bitcast_convert_op.py +++ b/test/cinn/ops/test_bitcast_convert_op.py @@ -70,7 +70,7 @@ class TestBitcastConvertCase1(TestBitcastConvertOp): packed = pack(data.size * 'h', *data.flatten()) self.inputs = {"x": data} self.outputs = { - "y": np.array(unpack('4i', packed), dtype='int32').reshape((4)), + "y": np.array(unpack('4i', packed), dtype='int32').reshape(4), "output_type": "int32", } diff --git a/test/cinn/ops/test_gather_nd_op.py b/test/cinn/ops/test_gather_nd_op.py index 16cd4a18d5a230478b1981bdb39dd43e15e4d5bd..477a03f2807499e986a04ca070d43d77e07af071 100644 --- a/test/cinn/ops/test_gather_nd_op.py +++ b/test/cinn/ops/test_gather_nd_op.py @@ -55,7 +55,7 @@ class TestGatherNdOp(OpTest): x = paddle.to_tensor(x, stop_gradient=False) index = paddle.to_tensor(index, stop_gradient=False) out = paddle.gather_nd(x, index) - logger.debug(" -- The output of Paddle:\n{}".format(out)) + logger.debug(f" -- The output of Paddle:\n{out}") self.paddle_outputs.append(out) def build_cinn_program(self, target): @@ -70,7 +70,7 @@ class TestGatherNdOp(OpTest): res = self.get_cinn_output( prog, target, [x, index], self.data[i], [out] ) - logger.debug(" -- The output of CINN:\n{}".format(res)) + logger.debug(f" -- The output of CINN:\n{res}") self.cinn_outputs.extend(res) def test_check_results(self): diff --git a/test/cinn/ops/test_gather_op.py b/test/cinn/ops/test_gather_op.py index 9f567cf0f3ca2c456d17fb4f4580163d4f1f296d..c0bb83f20ede9ca13ba6be073cc128cf483c6ba4 100644 --- a/test/cinn/ops/test_gather_op.py +++ b/test/cinn/ops/test_gather_op.py @@ -54,7 +54,7 @@ class TestGatherOp(OpTest): x = paddle.to_tensor(x, stop_gradient=False) index = paddle.to_tensor(index, stop_gradient=False) out = paddle.gather(x, index, axis) - logger.debug(" -- The output of Paddle:\n{}".format(out)) + logger.debug(f" -- The output of Paddle:\n{out}") self.paddle_outputs.append(out) def build_cinn_program(self, target): @@ -67,7 +67,7 @@ class TestGatherOp(OpTest): out = builder.gather(x, index, axis=axis) prog = builder.build() res = self.get_cinn_output(prog, target, [x, index], self.data, [out]) - logger.debug(" -- The output of CINN:\n{}".format(res)) + logger.debug(f" -- The output of CINN:\n{res}") self.cinn_outputs.extend(res) def test_check_results(self): diff --git a/test/cinn/ops/test_scatter_add.py b/test/cinn/ops/test_scatter_add.py index db1fdc935e29af180a14054af676ff63fe5e9a80..487b3f7cbb1b123730e18cf07db204d729f61aa7 100644 --- a/test/cinn/ops/test_scatter_add.py +++ b/test/cinn/ops/test_scatter_add.py @@ -79,7 +79,7 @@ class TestScatterAddOp(OpTest): [i, j, k, self.inputs["index"][l]] ) else: - self.assertTrue(False, "Axis {} No Implement".format(pos_axis)) + self.assertTrue(False, f"Axis {pos_axis} No Implement") index = paddle.to_tensor(index_nd, stop_gradient=True) res = paddle.scatter_nd_add(x, index, y) diff --git a/test/cinn/ops/test_scatter_assign_op.py b/test/cinn/ops/test_scatter_assign_op.py index 253e6fc1a8a498ddaa1c0711f7d5258716d7b3f9..628306a1dffb668eb092304efb96ac2d1062cf58 100644 --- a/test/cinn/ops/test_scatter_assign_op.py +++ b/test/cinn/ops/test_scatter_assign_op.py @@ -74,7 +74,7 @@ class TestScatterAssignOpBase(OpTest): l ] else: - self.assertTrue(False, "Axis {} No Implement".format(self.axis)) + self.assertTrue(False, f"Axis {self.axis} No Implement") pd_out = paddle.to_tensor(out, stop_gradient=True) self.paddle_outputs = [pd_out] diff --git a/test/cinn/ops/test_zero_dim_tensor.py b/test/cinn/ops/test_zero_dim_tensor.py index 966f17febd6feaa8a8324ab18849d55f47f26cd9..c6a01e3bdd78b8ee88a2eea48c0883c4b2867433 100644 --- a/test/cinn/ops/test_zero_dim_tensor.py +++ b/test/cinn/ops/test_zero_dim_tensor.py @@ -195,7 +195,7 @@ def create_unit_test( def cinn_func(self, builder, *args): return eval(fn_cinn)(*args) - cls_name = "{}_{}".format(parent.__name__, test_name) + cls_name = f"{parent.__name__}_{test_name}" TestClass.__name__ = cls_name globals()[cls_name] = TestClass diff --git a/test/cinn/passes/pass_test.py b/test/cinn/passes/pass_test.py index 8bcd4fbec2928eac938de174930668d13db0ff1e..71a30abb15bae8626b827e259bfc43e9ccdf2baf 100644 --- a/test/cinn/passes/pass_test.py +++ b/test/cinn/passes/pass_test.py @@ -29,7 +29,7 @@ logger = logging.getLogger(name="pass_test") class PassTest(OpTest): def __init__(self, *args, **kwargs): - super(PassTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.init_input_data() def init_input_data(self) -> dict: @@ -82,9 +82,9 @@ class PassTest(OpTest): def get_pass_size(self, passes): pass_prog, _, outputs = self.run_program() fetch_ids = {str(out) for out in outputs} - logger.debug("Before pass {}:\n{}".format(passes, str(pass_prog))) + logger.debug(f"Before pass {passes}:\n{str(pass_prog)}") op_num = pass_prog.apply_pass(fetch_ids, self.target, passes) - logger.debug("After pass {}:\n{}".format(passes, str(pass_prog))) + logger.debug(f"After pass {passes}:\n{str(pass_prog)}") return op_num def check_pass_outputs( @@ -97,9 +97,7 @@ class PassTest(OpTest): equal_nan=False, ): base_pass_size = self.get_pass_size(base_passes) - logger.debug( - "Pass after base pass optimize has {} ops".format(base_pass_size) - ) + logger.debug(f"Pass after base pass optimize has {base_pass_size} ops") test_pass_size = self.get_pass_size(base_passes + test_passes) logger.debug( "Pass after base and test pass optimize has {} ops".format( diff --git a/test/cinn/pool_utils.py b/test/cinn/pool_utils.py index 76c559ecf0a2de2005da534b315bf58d4befa835..b4a465be548f0cb180bfe98abeae04d7e9ba685a 100644 --- a/test/cinn/pool_utils.py +++ b/test/cinn/pool_utils.py @@ -41,7 +41,7 @@ def pool2d(np_data, attrs, dtype="float32"): elif key == "data_format": data_format = attrs.get_attr("data_format") else: - raise ValueError("attr_store {} is not supported".format(key)) + raise ValueError(f"attr_store {key} is not supported") if data_format == "NCHW": in_n, in_c, in_h, in_w = in_shape = np_data.shape @@ -52,7 +52,7 @@ def pool2d(np_data, attrs, dtype="float32"): height_axis = 1 width_axis = 2 else: - raise ValueError("data_format {} is not supported".format(data_format)) + raise ValueError(f"data_format {data_format} is not supported") if isinstance(kernel_size, int): k_h = k_w = kernel_size @@ -205,7 +205,7 @@ def pool2d(np_data, attrs, dtype="float32"): axis=(height_axis, width_axis), ) else: - raise ValueError("pool type {} is not supported".format(pool_type)) + raise ValueError(f"pool type {pool_type} is not supported") ret_np = np.maximum(ret_np, fill_value) return ret_np, [out_shape] @@ -232,7 +232,7 @@ def pool3d(np_data, attrs, dtype="float32"): elif key == "data_format": data_format = attrs.get_attr("data_format") else: - raise ValueError("attr_store {} is not supported".format(key)) + raise ValueError(f"attr_store {key} is not supported") if data_format == "NCDHW": in_n, in_c, in_d, in_h, in_w = in_shape = np_data.shape @@ -245,7 +245,7 @@ def pool3d(np_data, attrs, dtype="float32"): height_axis = 2 width_axis = 3 else: - raise ValueError("data_format {} is not supported".format(data_format)) + raise ValueError(f"data_format {data_format} is not supported") if isinstance(kernel_size, int): k_d = k_h = k_w = kernel_size @@ -416,7 +416,7 @@ def pool3d(np_data, attrs, dtype="float32"): axis=(depth_axis, height_axis, width_axis), ) else: - raise ValueError("pool type {} is not supported".format(pool_type)) + raise ValueError(f"pool type {pool_type} is not supported") ret_np = np.maximum(ret_np, fill_value) return ret_np, [out_shape] @@ -443,7 +443,7 @@ def pool1d(np_data, attrs, dtype="float32"): elif key == "data_format": data_format = attrs.get_attr("data_format") else: - raise ValueError("attr_store {} is not supported".format(key)) + raise ValueError(f"attr_store {key} is not supported") if data_format == "NCW": in_n, in_c, in_w = in_shape = np_data.shape @@ -452,7 +452,7 @@ def pool1d(np_data, attrs, dtype="float32"): in_n, in_w, in_c = in_shape = np_data.shape width_axis = 1 else: - raise ValueError("data_format {} is not supported".format(data_format)) + raise ValueError(f"data_format {data_format} is not supported") if isinstance(kernel_size, int): k_w = kernel_size @@ -539,7 +539,7 @@ def pool1d(np_data, attrs, dtype="float32"): pad_np[:, k * s_w : k * s_w + k_w, :], axis=width_axis ) else: - raise ValueError("pool type {} is not supported".format(pool_type)) + raise ValueError(f"pool type {pool_type} is not supported") ret_np = np.maximum(ret_np, fill_value) return ret_np, [out_shape] diff --git a/test/cinn/test_matmul.py b/test/cinn/test_matmul.py index 8deac2bb701afe6aa615e824907d2f4462f1e89e..50266941d877400c3f8ca1c094fa3ab74a1e16f3 100755 --- a/test/cinn/test_matmul.py +++ b/test/cinn/test_matmul.py @@ -63,7 +63,7 @@ class TestMamul(unittest.TestCase): def create_matmul_basic(target, m, n, k): - m, n, k = [ir.Expr(_) for _ in (m, n, k)] + m, n, k = (ir.Expr(_) for _ in (m, n, k)) a = lang.Placeholder("float32", "A", [m, k]) b = lang.Placeholder("float32", "B", [k, n]) @@ -90,7 +90,7 @@ def create_matmul_basic(target, m, n, k): def create_matmul_tile(target, m, n, k): - m, n, k = [ir.Expr(_) for _ in [m, n, k]] + m, n, k = (ir.Expr(_) for _ in [m, n, k]) a = lang.Placeholder("float32", "A", [m, k]) b = lang.Placeholder("float32", "B", [k, n]) diff --git a/test/cinn/test_packed_func.py b/test/cinn/test_packed_func.py index 917097a533a752c60ee576b5cf7074eb9ee37e9e..4525eab37ec71e349743ce91ff7e5e2a050600db 100755 --- a/test/cinn/test_packed_func.py +++ b/test/cinn/test_packed_func.py @@ -43,7 +43,7 @@ class TestPackedFunc(unittest.TestCase): self.assertEqual(mul(4, 5), 20) def test_callable_object(self): - class Accumulator(object): + class Accumulator: def __init__(self, init): self.init = init diff --git a/test/cinn/test_paddle_model_convertor.py b/test/cinn/test_paddle_model_convertor.py index a408717742bc7143f4598f89cabb959b6076e783..8d8313807a2ff290544f06cb3f044cef53f43a9b 100644 --- a/test/cinn/test_paddle_model_convertor.py +++ b/test/cinn/test_paddle_model_convertor.py @@ -165,9 +165,9 @@ class TestPaddleModel(OpMapperTest): return_numpy=True, ) - logger.debug(msg="Program:\n{}".format(self.inference_program)) - logger.debug(msg="Param List: {}".format(self.param_vars.keys())) - logger.debug(msg="Feed List: {}".format(self.feed_names)) + logger.debug(msg=f"Program:\n{self.inference_program}") + logger.debug(msg=f"Param List: {self.param_vars.keys()}") + logger.debug(msg=f"Feed List: {self.feed_names}") logger.debug( msg="Fetch List: {}".format( [var.name for var in self.fetch_targets] @@ -195,7 +195,7 @@ class TestPaddleModel(OpMapperTest): fetch_list=self.fetch_targets, return_numpy=True, ) - logger.debug("Paddle Result:\n{}".format(self.paddle_outputs)) + logger.debug(f"Paddle Result:\n{self.paddle_outputs}") def build_cinn_program(self, target): self.assertEqual( @@ -237,9 +237,7 @@ class TestPaddleModel(OpMapperTest): # get cinn input list inputs = prog.get_inputs() - logger.debug( - "CINN Input List: {}".format([var.name() for var in inputs]) - ) + logger.debug(f"CINN Input List: {[var.name() for var in inputs]}") self.assertEqual( len(feed_with_param), len(inputs), @@ -284,7 +282,7 @@ class TestPaddleModel(OpMapperTest): prog, target, cinn_inputs, cinn_feed_datas, cinn_output, passes=[] ) - logger.debug("CINN Result:\n{}".format(self.cinn_outputs)) + logger.debug(f"CINN Result:\n{self.cinn_outputs}") def test_check_results(self): # TODO(6clc): There is a random accuracy problem, diff --git a/test/cinn/test_pe_elementwise.py b/test/cinn/test_pe_elementwise.py index a8a52fa285a16f154ed5154c5cbb3b127446f2e1..2f17231d31fee1ad13b74be0d4dcc3accf631f17 100644 --- a/test/cinn/test_pe_elementwise.py +++ b/test/cinn/test_pe_elementwise.py @@ -113,13 +113,13 @@ class TestPEElementwise(unittest.TestCase): is_round=False, is_bool=False, ): - m, n = [ + m, n = ( ir.Expr(_) for _ in ( self.m, self.n, ) - ] + ) x = lang.Placeholder(dtype, "x", [m, n]) y = cinn_fn(x.to_tensor()) diff --git a/test/cinn/test_pe_reduction.py b/test/cinn/test_pe_reduction.py index 946983496884d45aa54b248648cd7978497ddffc..386f6faa841803c1d4d70b1438d7142e055d47c6 100644 --- a/test/cinn/test_pe_reduction.py +++ b/test/cinn/test_pe_reduction.py @@ -100,13 +100,13 @@ class TestPEReduction(unittest.TestCase): self.reduction_tester(fn_name, pe_fn, np_fn, [1], False) def reduction_tester(self, fn_name, cinn_fn, np_fn, axes, keep_dims): - m, n = [ + m, n = ( ir.Expr(_) for _ in ( self.m, self.n, ) - ] + ) x = lang.Placeholder("float32", "x", [m, n]) func_name = "test_" + fn_name y = cinn_fn(x.to_tensor(), axes, keep_dims) diff --git a/test/cinn/test_pe_transform.py b/test/cinn/test_pe_transform.py index becf4aa02e25baf45bf216fff9bfc51d0393b6d7..8b08068f7e17b8f5ae221195a7215dd080d5ae9c 100644 --- a/test/cinn/test_pe_transform.py +++ b/test/cinn/test_pe_transform.py @@ -53,14 +53,14 @@ class TestPETransform(unittest.TestCase): def transform_matmul_tester( self, fn_name, cinn_fn, np_fn, trans_a, trans_b, alpha ): - m, n, k = [ + m, n, k = ( ir.Expr(_) for _ in ( self.m, self.n, self.k, ) - ] + ) x_shape_expr = [k, m] if trans_a else [m, k] y_shape_expr = [n, k] if trans_b else [k, n] x = lang.Placeholder("float32", "x", x_shape_expr) diff --git a/tools/cinn/gen_c++_tutorial.py b/tools/cinn/gen_c++_tutorial.py index 16132de0999a01da66fa9f60546646ab77398525..08f66411642812eaa5829bea78896c967c1719da 100644 --- a/tools/cinn/gen_c++_tutorial.py +++ b/tools/cinn/gen_c++_tutorial.py @@ -28,7 +28,7 @@ import sys from typing import List -class Markdown(object): +class Markdown: ''' A simple markdown generator. ''' @@ -92,7 +92,7 @@ class Mark: roc = "@ROC" -class ContentGenerator(object): +class ContentGenerator: ''' Interface for some content passed into the parser. ''' @@ -104,7 +104,7 @@ class ContentGenerator(object): pass -class Parser(object): +class Parser: DOC_COMMENT_PREFIX = "//!" def __init__(self): diff --git a/tools/cinn/paddle_benchmark/paddle_test_benchmark.py b/tools/cinn/paddle_benchmark/paddle_test_benchmark.py index 1ce6ce4e9ffa5a98be64608c8299cab31aad40ce..ff7f4d5bd72c8e6ada041b5d2a482e8afdfeebc9 100755 --- a/tools/cinn/paddle_benchmark/paddle_test_benchmark.py +++ b/tools/cinn/paddle_benchmark/paddle_test_benchmark.py @@ -49,7 +49,7 @@ def main(): predictor.zero_copy_run() time2 = time.time() total_inference_cost = (time2 - time1) * 1000 # total time cost(ms) - print("Average latency : {} ms".format(total_inference_cost / repeat)) + print(f"Average latency : {total_inference_cost / repeat} ms") output_names = predictor.get_output_names() output_tensor = predictor.get_output_tensor(output_names[0]) output_data = output_tensor.copy_to_cpu() diff --git a/tools/cinn/tvm_benchmark/tvm_graph_with_single_op.py b/tools/cinn/tvm_benchmark/tvm_graph_with_single_op.py index 664970bc1931671edaf2f2a915a765b30b4321f0..79360ed01abd438f0aba057003becbb1effb9189 100755 --- a/tools/cinn/tvm_benchmark/tvm_graph_with_single_op.py +++ b/tools/cinn/tvm_benchmark/tvm_graph_with_single_op.py @@ -240,14 +240,16 @@ def tune_and_evaluate(func): np.array(evaluator_preheat().results) * 1000 ) # convert to millisecond print( - "[PreHeat]Mean inference time (std dev): %.4f ms (%.4f ms)" - % (np.mean(prof_res1), np.std(prof_res1)) + "[PreHeat]Mean inference time (std dev): {:.4f} ms ({:.4f} ms)".format( + np.mean(prof_res1), np.std(prof_res1) + ) ) prof_res2 = np.array(evaluator().results) * 1000 # convert to millisecond print( - "[Benchmark]Mean inference time (std dev): %.4f ms (%.4f ms)" - % (np.mean(prof_res2), np.std(prof_res2)) + "[Benchmark]Mean inference time (std dev): {:.4f} ms ({:.4f} ms)".format( + np.mean(prof_res2), np.std(prof_res2) + ) )