From 402e277fb718f0263baca0aecc24fe8964cb0136 Mon Sep 17 00:00:00 2001 From: gouzil <66515297+gouzil@users.noreply.github.com> Date: Mon, 3 Jul 2023 11:21:12 +0800 Subject: [PATCH] [CodeStyle][CINN] fix Ruff lint errors (pyupgrade rules) (#54988) --- .../backends/llvm/generate_runtime_llvm_ir.py | 2 +- pyproject.toml | 12 ------------ .../auto_schedule/cost_model/cost_model.py | 2 +- .../auto_schedule/cost_model/xgb_cost_model.py | 2 +- test/cinn/conv2d_utils.py | 2 +- test/cinn/fusion/fusion_test.py | 6 ++---- test/cinn/op_mappers/op_mapper_test.py | 2 +- test/cinn/ops/op_test.py | 4 ++-- test/cinn/ops/test_bitcast_convert_op.py | 2 +- test/cinn/ops/test_gather_nd_op.py | 4 ++-- test/cinn/ops/test_gather_op.py | 4 ++-- test/cinn/ops/test_scatter_add.py | 2 +- test/cinn/ops/test_scatter_assign_op.py | 2 +- test/cinn/ops/test_zero_dim_tensor.py | 2 +- test/cinn/passes/pass_test.py | 10 ++++------ test/cinn/pool_utils.py | 18 +++++++++--------- test/cinn/test_matmul.py | 4 ++-- test/cinn/test_packed_func.py | 2 +- test/cinn/test_paddle_model_convertor.py | 14 ++++++-------- test/cinn/test_pe_elementwise.py | 4 ++-- test/cinn/test_pe_reduction.py | 4 ++-- test/cinn/test_pe_transform.py | 4 ++-- tools/cinn/gen_c++_tutorial.py | 6 +++--- .../paddle_benchmark/paddle_test_benchmark.py | 2 +- .../tvm_benchmark/tvm_graph_with_single_op.py | 10 ++++++---- 25 files changed, 55 insertions(+), 71 deletions(-) diff --git a/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py b/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py index cb57d85c32c..257ef3a7215 100644 --- a/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py +++ b/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py @@ -35,7 +35,7 @@ def main(): srcs.append(')ROC"') srcs.append(');\n') - cmd = "{} --version".format(llvm_config) + cmd = f"{llvm_config} --version" version = ( subprocess.check_output(cmd, shell=True) .decode('utf-8') diff --git a/pyproject.toml b/pyproject.toml index 254014988c9..3721ffed727 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,6 @@ ignore = [ "python/cinn/**" = [ "F401", "F403", - "UP004", ] "test/cinn/**" = [ "F401", @@ -112,23 +111,12 @@ ignore = [ "F901", "C408", "C417", - "UP004", - "UP008", - "UP027", - "UP032", - "UP034", "PLR0402", "PLC0414", "PLE1205", ] -"paddle/cinn/**" = [ - "UP032", -] "tools/cinn/**" = [ "F401", "C416", - "UP004", - "UP031", - "UP032", "PLR0402", ] diff --git a/python/cinn/auto_schedule/cost_model/cost_model.py b/python/cinn/auto_schedule/cost_model/cost_model.py index 1a1307b723a..7b0d8647f6c 100644 --- a/python/cinn/auto_schedule/cost_model/cost_model.py +++ b/python/cinn/auto_schedule/cost_model/cost_model.py @@ -21,7 +21,7 @@ class CostModelType(enum.Enum): XGB = 1 -class CostModel(object): +class CostModel: """ A base class to call different cost model algorithm. """ diff --git a/python/cinn/auto_schedule/cost_model/xgb_cost_model.py b/python/cinn/auto_schedule/cost_model/xgb_cost_model.py index db6a123b3d5..6dc3c8e3bab 100644 --- a/python/cinn/auto_schedule/cost_model/xgb_cost_model.py +++ b/python/cinn/auto_schedule/cost_model/xgb_cost_model.py @@ -16,7 +16,7 @@ import numpy as np import xgboost as xgb -class XgbCostModel(object): +class XgbCostModel: """ A cost model implemented by XgbCostModel """ diff --git a/test/cinn/conv2d_utils.py b/test/cinn/conv2d_utils.py index 18a6cc02d0e..9a31e290fe5 100644 --- a/test/cinn/conv2d_utils.py +++ b/test/cinn/conv2d_utils.py @@ -42,7 +42,7 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise): elif key == "data_format": data_format = attrs.get_attr("data_format") else: - raise ValueError("attr_store {} is not supported".format(key)) + raise ValueError(f"attr_store {key} is not supported") img = static.data(name='img', shape=input_shape[1:], dtype='float32') if is_depthwise: diff --git a/test/cinn/fusion/fusion_test.py b/test/cinn/fusion/fusion_test.py index 94f2e68df60..db11a78c982 100644 --- a/test/cinn/fusion/fusion_test.py +++ b/test/cinn/fusion/fusion_test.py @@ -22,7 +22,7 @@ logger = logging.getLogger(name="pass_test") class FusionTest(PassTest): def __init__(self, *args, **kwargs): - super(FusionTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def init_input_data(self): """Set feed data""" @@ -44,9 +44,7 @@ class FusionTest(PassTest): fusion_passes = ["OpFusionPass", "FusionMergePass"] real_group_size = self.get_pass_size(base_passes + fusion_passes) - logger.debug( - "The model has been fused into {} groups".format(real_group_size) - ) + logger.debug(f"The model has been fused into {real_group_size} groups") self.assertEqual( real_group_size, group_size, diff --git a/test/cinn/op_mappers/op_mapper_test.py b/test/cinn/op_mappers/op_mapper_test.py index 1b4076cd6e8..881dc740a35 100644 --- a/test/cinn/op_mappers/op_mapper_test.py +++ b/test/cinn/op_mappers/op_mapper_test.py @@ -38,7 +38,7 @@ paddle.enable_static() class OpMapperTest(OpTest): def __init__(self, *args, **kwargs): - super(OpMapperTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._init_place() self.init_input_data() diff --git a/test/cinn/ops/op_test.py b/test/cinn/ops/op_test.py index 398a3e79721..d0952caef29 100755 --- a/test/cinn/ops/op_test.py +++ b/test/cinn/ops/op_test.py @@ -58,7 +58,7 @@ def convert_uint16_to_float(data): class OpTest(unittest.TestCase): def __init__(self, *args, **kwargs): - super(OpTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._init_target() self._init_results() self._init_seed() @@ -304,7 +304,7 @@ class OpTest(unittest.TestCase): error_message = "[Check " + name + "] " + error_message - logger.debug("{} {}".format(is_allclose, error_message)) + logger.debug(f"{is_allclose} {error_message}") self.assertTrue(is_allclose, msg=error_message) @staticmethod diff --git a/test/cinn/ops/test_bitcast_convert_op.py b/test/cinn/ops/test_bitcast_convert_op.py index 2da4aed7cc7..30af48b8a24 100644 --- a/test/cinn/ops/test_bitcast_convert_op.py +++ b/test/cinn/ops/test_bitcast_convert_op.py @@ -70,7 +70,7 @@ class TestBitcastConvertCase1(TestBitcastConvertOp): packed = pack(data.size * 'h', *data.flatten()) self.inputs = {"x": data} self.outputs = { - "y": np.array(unpack('4i', packed), dtype='int32').reshape((4)), + "y": np.array(unpack('4i', packed), dtype='int32').reshape(4), "output_type": "int32", } diff --git a/test/cinn/ops/test_gather_nd_op.py b/test/cinn/ops/test_gather_nd_op.py index 16cd4a18d5a..477a03f2807 100644 --- a/test/cinn/ops/test_gather_nd_op.py +++ b/test/cinn/ops/test_gather_nd_op.py @@ -55,7 +55,7 @@ class TestGatherNdOp(OpTest): x = paddle.to_tensor(x, stop_gradient=False) index = paddle.to_tensor(index, stop_gradient=False) out = paddle.gather_nd(x, index) - logger.debug(" -- The output of Paddle:\n{}".format(out)) + logger.debug(f" -- The output of Paddle:\n{out}") self.paddle_outputs.append(out) def build_cinn_program(self, target): @@ -70,7 +70,7 @@ class TestGatherNdOp(OpTest): res = self.get_cinn_output( prog, target, [x, index], self.data[i], [out] ) - logger.debug(" -- The output of CINN:\n{}".format(res)) + logger.debug(f" -- The output of CINN:\n{res}") self.cinn_outputs.extend(res) def test_check_results(self): diff --git a/test/cinn/ops/test_gather_op.py b/test/cinn/ops/test_gather_op.py index 9f567cf0f3c..c0bb83f20ed 100644 --- a/test/cinn/ops/test_gather_op.py +++ b/test/cinn/ops/test_gather_op.py @@ -54,7 +54,7 @@ class TestGatherOp(OpTest): x = paddle.to_tensor(x, stop_gradient=False) index = paddle.to_tensor(index, stop_gradient=False) out = paddle.gather(x, index, axis) - logger.debug(" -- The output of Paddle:\n{}".format(out)) + logger.debug(f" -- The output of Paddle:\n{out}") self.paddle_outputs.append(out) def build_cinn_program(self, target): @@ -67,7 +67,7 @@ class TestGatherOp(OpTest): out = builder.gather(x, index, axis=axis) prog = builder.build() res = self.get_cinn_output(prog, target, [x, index], self.data, [out]) - logger.debug(" -- The output of CINN:\n{}".format(res)) + logger.debug(f" -- The output of CINN:\n{res}") self.cinn_outputs.extend(res) def test_check_results(self): diff --git a/test/cinn/ops/test_scatter_add.py b/test/cinn/ops/test_scatter_add.py index db1fdc935e2..487b3f7cbb1 100644 --- a/test/cinn/ops/test_scatter_add.py +++ b/test/cinn/ops/test_scatter_add.py @@ -79,7 +79,7 @@ class TestScatterAddOp(OpTest): [i, j, k, self.inputs["index"][l]] ) else: - self.assertTrue(False, "Axis {} No Implement".format(pos_axis)) + self.assertTrue(False, f"Axis {pos_axis} No Implement") index = paddle.to_tensor(index_nd, stop_gradient=True) res = paddle.scatter_nd_add(x, index, y) diff --git a/test/cinn/ops/test_scatter_assign_op.py b/test/cinn/ops/test_scatter_assign_op.py index 253e6fc1a8a..628306a1dff 100644 --- a/test/cinn/ops/test_scatter_assign_op.py +++ b/test/cinn/ops/test_scatter_assign_op.py @@ -74,7 +74,7 @@ class TestScatterAssignOpBase(OpTest): l ] else: - self.assertTrue(False, "Axis {} No Implement".format(self.axis)) + self.assertTrue(False, f"Axis {self.axis} No Implement") pd_out = paddle.to_tensor(out, stop_gradient=True) self.paddle_outputs = [pd_out] diff --git a/test/cinn/ops/test_zero_dim_tensor.py b/test/cinn/ops/test_zero_dim_tensor.py index 966f17febd6..c6a01e3bdd7 100644 --- a/test/cinn/ops/test_zero_dim_tensor.py +++ b/test/cinn/ops/test_zero_dim_tensor.py @@ -195,7 +195,7 @@ def create_unit_test( def cinn_func(self, builder, *args): return eval(fn_cinn)(*args) - cls_name = "{}_{}".format(parent.__name__, test_name) + cls_name = f"{parent.__name__}_{test_name}" TestClass.__name__ = cls_name globals()[cls_name] = TestClass diff --git a/test/cinn/passes/pass_test.py b/test/cinn/passes/pass_test.py index 8bcd4fbec29..71a30abb15b 100644 --- a/test/cinn/passes/pass_test.py +++ b/test/cinn/passes/pass_test.py @@ -29,7 +29,7 @@ logger = logging.getLogger(name="pass_test") class PassTest(OpTest): def __init__(self, *args, **kwargs): - super(PassTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.init_input_data() def init_input_data(self) -> dict: @@ -82,9 +82,9 @@ class PassTest(OpTest): def get_pass_size(self, passes): pass_prog, _, outputs = self.run_program() fetch_ids = {str(out) for out in outputs} - logger.debug("Before pass {}:\n{}".format(passes, str(pass_prog))) + logger.debug(f"Before pass {passes}:\n{str(pass_prog)}") op_num = pass_prog.apply_pass(fetch_ids, self.target, passes) - logger.debug("After pass {}:\n{}".format(passes, str(pass_prog))) + logger.debug(f"After pass {passes}:\n{str(pass_prog)}") return op_num def check_pass_outputs( @@ -97,9 +97,7 @@ class PassTest(OpTest): equal_nan=False, ): base_pass_size = self.get_pass_size(base_passes) - logger.debug( - "Pass after base pass optimize has {} ops".format(base_pass_size) - ) + logger.debug(f"Pass after base pass optimize has {base_pass_size} ops") test_pass_size = self.get_pass_size(base_passes + test_passes) logger.debug( "Pass after base and test pass optimize has {} ops".format( diff --git a/test/cinn/pool_utils.py b/test/cinn/pool_utils.py index 76c559ecf0a..b4a465be548 100644 --- a/test/cinn/pool_utils.py +++ b/test/cinn/pool_utils.py @@ -41,7 +41,7 @@ def pool2d(np_data, attrs, dtype="float32"): elif key == "data_format": data_format = attrs.get_attr("data_format") else: - raise ValueError("attr_store {} is not supported".format(key)) + raise ValueError(f"attr_store {key} is not supported") if data_format == "NCHW": in_n, in_c, in_h, in_w = in_shape = np_data.shape @@ -52,7 +52,7 @@ def pool2d(np_data, attrs, dtype="float32"): height_axis = 1 width_axis = 2 else: - raise ValueError("data_format {} is not supported".format(data_format)) + raise ValueError(f"data_format {data_format} is not supported") if isinstance(kernel_size, int): k_h = k_w = kernel_size @@ -205,7 +205,7 @@ def pool2d(np_data, attrs, dtype="float32"): axis=(height_axis, width_axis), ) else: - raise ValueError("pool type {} is not supported".format(pool_type)) + raise ValueError(f"pool type {pool_type} is not supported") ret_np = np.maximum(ret_np, fill_value) return ret_np, [out_shape] @@ -232,7 +232,7 @@ def pool3d(np_data, attrs, dtype="float32"): elif key == "data_format": data_format = attrs.get_attr("data_format") else: - raise ValueError("attr_store {} is not supported".format(key)) + raise ValueError(f"attr_store {key} is not supported") if data_format == "NCDHW": in_n, in_c, in_d, in_h, in_w = in_shape = np_data.shape @@ -245,7 +245,7 @@ def pool3d(np_data, attrs, dtype="float32"): height_axis = 2 width_axis = 3 else: - raise ValueError("data_format {} is not supported".format(data_format)) + raise ValueError(f"data_format {data_format} is not supported") if isinstance(kernel_size, int): k_d = k_h = k_w = kernel_size @@ -416,7 +416,7 @@ def pool3d(np_data, attrs, dtype="float32"): axis=(depth_axis, height_axis, width_axis), ) else: - raise ValueError("pool type {} is not supported".format(pool_type)) + raise ValueError(f"pool type {pool_type} is not supported") ret_np = np.maximum(ret_np, fill_value) return ret_np, [out_shape] @@ -443,7 +443,7 @@ def pool1d(np_data, attrs, dtype="float32"): elif key == "data_format": data_format = attrs.get_attr("data_format") else: - raise ValueError("attr_store {} is not supported".format(key)) + raise ValueError(f"attr_store {key} is not supported") if data_format == "NCW": in_n, in_c, in_w = in_shape = np_data.shape @@ -452,7 +452,7 @@ def pool1d(np_data, attrs, dtype="float32"): in_n, in_w, in_c = in_shape = np_data.shape width_axis = 1 else: - raise ValueError("data_format {} is not supported".format(data_format)) + raise ValueError(f"data_format {data_format} is not supported") if isinstance(kernel_size, int): k_w = kernel_size @@ -539,7 +539,7 @@ def pool1d(np_data, attrs, dtype="float32"): pad_np[:, k * s_w : k * s_w + k_w, :], axis=width_axis ) else: - raise ValueError("pool type {} is not supported".format(pool_type)) + raise ValueError(f"pool type {pool_type} is not supported") ret_np = np.maximum(ret_np, fill_value) return ret_np, [out_shape] diff --git a/test/cinn/test_matmul.py b/test/cinn/test_matmul.py index 8deac2bb701..50266941d87 100755 --- a/test/cinn/test_matmul.py +++ b/test/cinn/test_matmul.py @@ -63,7 +63,7 @@ class TestMamul(unittest.TestCase): def create_matmul_basic(target, m, n, k): - m, n, k = [ir.Expr(_) for _ in (m, n, k)] + m, n, k = (ir.Expr(_) for _ in (m, n, k)) a = lang.Placeholder("float32", "A", [m, k]) b = lang.Placeholder("float32", "B", [k, n]) @@ -90,7 +90,7 @@ def create_matmul_basic(target, m, n, k): def create_matmul_tile(target, m, n, k): - m, n, k = [ir.Expr(_) for _ in [m, n, k]] + m, n, k = (ir.Expr(_) for _ in [m, n, k]) a = lang.Placeholder("float32", "A", [m, k]) b = lang.Placeholder("float32", "B", [k, n]) diff --git a/test/cinn/test_packed_func.py b/test/cinn/test_packed_func.py index 917097a533a..4525eab37ec 100755 --- a/test/cinn/test_packed_func.py +++ b/test/cinn/test_packed_func.py @@ -43,7 +43,7 @@ class TestPackedFunc(unittest.TestCase): self.assertEqual(mul(4, 5), 20) def test_callable_object(self): - class Accumulator(object): + class Accumulator: def __init__(self, init): self.init = init diff --git a/test/cinn/test_paddle_model_convertor.py b/test/cinn/test_paddle_model_convertor.py index a408717742b..8d8313807a2 100644 --- a/test/cinn/test_paddle_model_convertor.py +++ b/test/cinn/test_paddle_model_convertor.py @@ -165,9 +165,9 @@ class TestPaddleModel(OpMapperTest): return_numpy=True, ) - logger.debug(msg="Program:\n{}".format(self.inference_program)) - logger.debug(msg="Param List: {}".format(self.param_vars.keys())) - logger.debug(msg="Feed List: {}".format(self.feed_names)) + logger.debug(msg=f"Program:\n{self.inference_program}") + logger.debug(msg=f"Param List: {self.param_vars.keys()}") + logger.debug(msg=f"Feed List: {self.feed_names}") logger.debug( msg="Fetch List: {}".format( [var.name for var in self.fetch_targets] @@ -195,7 +195,7 @@ class TestPaddleModel(OpMapperTest): fetch_list=self.fetch_targets, return_numpy=True, ) - logger.debug("Paddle Result:\n{}".format(self.paddle_outputs)) + logger.debug(f"Paddle Result:\n{self.paddle_outputs}") def build_cinn_program(self, target): self.assertEqual( @@ -237,9 +237,7 @@ class TestPaddleModel(OpMapperTest): # get cinn input list inputs = prog.get_inputs() - logger.debug( - "CINN Input List: {}".format([var.name() for var in inputs]) - ) + logger.debug(f"CINN Input List: {[var.name() for var in inputs]}") self.assertEqual( len(feed_with_param), len(inputs), @@ -284,7 +282,7 @@ class TestPaddleModel(OpMapperTest): prog, target, cinn_inputs, cinn_feed_datas, cinn_output, passes=[] ) - logger.debug("CINN Result:\n{}".format(self.cinn_outputs)) + logger.debug(f"CINN Result:\n{self.cinn_outputs}") def test_check_results(self): # TODO(6clc): There is a random accuracy problem, diff --git a/test/cinn/test_pe_elementwise.py b/test/cinn/test_pe_elementwise.py index a8a52fa285a..2f17231d31f 100644 --- a/test/cinn/test_pe_elementwise.py +++ b/test/cinn/test_pe_elementwise.py @@ -113,13 +113,13 @@ class TestPEElementwise(unittest.TestCase): is_round=False, is_bool=False, ): - m, n = [ + m, n = ( ir.Expr(_) for _ in ( self.m, self.n, ) - ] + ) x = lang.Placeholder(dtype, "x", [m, n]) y = cinn_fn(x.to_tensor()) diff --git a/test/cinn/test_pe_reduction.py b/test/cinn/test_pe_reduction.py index 94698349688..386f6faa841 100644 --- a/test/cinn/test_pe_reduction.py +++ b/test/cinn/test_pe_reduction.py @@ -100,13 +100,13 @@ class TestPEReduction(unittest.TestCase): self.reduction_tester(fn_name, pe_fn, np_fn, [1], False) def reduction_tester(self, fn_name, cinn_fn, np_fn, axes, keep_dims): - m, n = [ + m, n = ( ir.Expr(_) for _ in ( self.m, self.n, ) - ] + ) x = lang.Placeholder("float32", "x", [m, n]) func_name = "test_" + fn_name y = cinn_fn(x.to_tensor(), axes, keep_dims) diff --git a/test/cinn/test_pe_transform.py b/test/cinn/test_pe_transform.py index becf4aa02e2..8b08068f7e1 100644 --- a/test/cinn/test_pe_transform.py +++ b/test/cinn/test_pe_transform.py @@ -53,14 +53,14 @@ class TestPETransform(unittest.TestCase): def transform_matmul_tester( self, fn_name, cinn_fn, np_fn, trans_a, trans_b, alpha ): - m, n, k = [ + m, n, k = ( ir.Expr(_) for _ in ( self.m, self.n, self.k, ) - ] + ) x_shape_expr = [k, m] if trans_a else [m, k] y_shape_expr = [n, k] if trans_b else [k, n] x = lang.Placeholder("float32", "x", x_shape_expr) diff --git a/tools/cinn/gen_c++_tutorial.py b/tools/cinn/gen_c++_tutorial.py index 16132de0999..08f66411642 100644 --- a/tools/cinn/gen_c++_tutorial.py +++ b/tools/cinn/gen_c++_tutorial.py @@ -28,7 +28,7 @@ import sys from typing import List -class Markdown(object): +class Markdown: ''' A simple markdown generator. ''' @@ -92,7 +92,7 @@ class Mark: roc = "@ROC" -class ContentGenerator(object): +class ContentGenerator: ''' Interface for some content passed into the parser. ''' @@ -104,7 +104,7 @@ class ContentGenerator(object): pass -class Parser(object): +class Parser: DOC_COMMENT_PREFIX = "//!" def __init__(self): diff --git a/tools/cinn/paddle_benchmark/paddle_test_benchmark.py b/tools/cinn/paddle_benchmark/paddle_test_benchmark.py index 1ce6ce4e9ff..ff7f4d5bd72 100755 --- a/tools/cinn/paddle_benchmark/paddle_test_benchmark.py +++ b/tools/cinn/paddle_benchmark/paddle_test_benchmark.py @@ -49,7 +49,7 @@ def main(): predictor.zero_copy_run() time2 = time.time() total_inference_cost = (time2 - time1) * 1000 # total time cost(ms) - print("Average latency : {} ms".format(total_inference_cost / repeat)) + print(f"Average latency : {total_inference_cost / repeat} ms") output_names = predictor.get_output_names() output_tensor = predictor.get_output_tensor(output_names[0]) output_data = output_tensor.copy_to_cpu() diff --git a/tools/cinn/tvm_benchmark/tvm_graph_with_single_op.py b/tools/cinn/tvm_benchmark/tvm_graph_with_single_op.py index 664970bc193..79360ed01ab 100755 --- a/tools/cinn/tvm_benchmark/tvm_graph_with_single_op.py +++ b/tools/cinn/tvm_benchmark/tvm_graph_with_single_op.py @@ -240,14 +240,16 @@ def tune_and_evaluate(func): np.array(evaluator_preheat().results) * 1000 ) # convert to millisecond print( - "[PreHeat]Mean inference time (std dev): %.4f ms (%.4f ms)" - % (np.mean(prof_res1), np.std(prof_res1)) + "[PreHeat]Mean inference time (std dev): {:.4f} ms ({:.4f} ms)".format( + np.mean(prof_res1), np.std(prof_res1) + ) ) prof_res2 = np.array(evaluator().results) * 1000 # convert to millisecond print( - "[Benchmark]Mean inference time (std dev): %.4f ms (%.4f ms)" - % (np.mean(prof_res2), np.std(prof_res2)) + "[Benchmark]Mean inference time (std dev): {:.4f} ms ({:.4f} ms)".format( + np.mean(prof_res2), np.std(prof_res2) + ) ) -- GitLab