From 403e811051ef02cb3f6def0c6e4617c4a95d7a1a Mon Sep 17 00:00:00 2001 From: lvwenyuan 00536823 Date: Tue, 4 Aug 2020 16:13:22 +0800 Subject: [PATCH] add env var to control whether dump gpu ir and cuda --- python/akg/composite/build_module.py | 3 +- python/akg/ms/message.py | 5 +- python/akg/ms/op_build.py | 2 +- python/akg/ops/nn/maxpool.py | 2 +- python/akg/ops/nn/maxpool_ad.py | 4 +- python/akg/utils/dump_cuda_meta.py | 14 +++- python/akg/utils/kernel_exec.py | 71 ++++++++++++------- src/codegen/build_module.cc | 4 +- tests/common/test_op/add_a_conv.py | 2 +- tests/common/test_op/add_b_conv.py | 2 +- tests/common/test_op/col2im_compute.py | 2 +- tests/common/test_op/gather.py | 2 +- tests/common/test_op/im2col_compute.py | 2 +- tests/common/test_op/reduce_max_ad.py | 2 +- tests/common/test_op/reduce_min_ad.py | 2 +- tests/common/test_op/vector_matmul.py | 2 +- tests/common/test_run/IOU_for_train_run.py | 2 +- tests/common/test_run/avgpool_ad_run.py | 4 +- .../test_run/bounding_box_encode_run.py | 4 +- tests/common/test_run/conv_filter_ad_run.py | 2 +- tests/common/test_run/conv_run_mansch.py | 2 +- .../distr_bernoulli_logprob_ad_run.py | 2 +- .../test_run/distr_bernoulli_logprob_run.py | 2 +- .../distr_normal_diag_KLdiv_ad_run.py | 2 +- .../test_run/distr_normal_diag_KLdiv_run.py | 2 +- .../distr_normal_diag_logprob_ad_run.py | 2 +- .../test_run/distr_normal_diag_logprob_run.py | 2 +- .../distr_normal_diag_sample_ad_run.py | 2 +- .../test_run/distr_normal_diag_sample_run.py | 2 +- .../distr_normal_prob_regr_train_run.py | 2 +- tests/common/test_run/dropout_run.py | 2 +- tests/common/test_run/kldiv_loss_grad_run.py | 4 +- tests/common/test_run/l1_loss_grad_run.py | 4 +- tests/common/test_run/matmul_run_mansch.py | 2 +- tests/common/test_run/maxpool_ad_run.py | 6 +- tests/common/test_run/maxpool_grad_run.py | 4 +- .../test_run/maxpool_grad_with_argmax_run.py | 4 +- tests/common/test_run/nms_run.py | 2 +- tests/common/test_run/roipool_run.py | 2 +- .../test_run/smooth_l1_loss_grad_run.py | 4 +- .../common/test_run/square_difference_run.py | 2 +- .../common/test_run/strided_slice_grad_run.py | 2 +- tests/common/test_run/truncatemod_run.py | 4 +- tests/common/test_run/vector_matmul_run.py | 2 +- tests/common/test_run/winograd_ad_run.py | 4 +- tests/operators/cube/quant_conv.py | 2 +- 46 files changed, 118 insertions(+), 85 deletions(-) diff --git a/python/akg/composite/build_module.py b/python/akg/composite/build_module.py index a54d99a..dfa9428 100644 --- a/python/akg/composite/build_module.py +++ b/python/akg/composite/build_module.py @@ -176,7 +176,8 @@ def build_cuda(outputs, args, sch_name, kernel_name): } with tvm.target.cuda() as cuda: s = scheduler[sch_name](outputs) - with tvm.build_config(dump_pass_ir = True): + dump_ir = os.getenv('MS_AKG_DUMP_IR') == "on" + with tvm.build_config(dump_pass_ir = dump_ir): mod = tvm.build(s, args, cuda, name = kernel_name) dump_cuda_meta.dump(mod, kernel_name, s, list(args)) return mod diff --git a/python/akg/ms/message.py b/python/akg/ms/message.py index 8ed443f..94e533a 100644 --- a/python/akg/ms/message.py +++ b/python/akg/ms/message.py @@ -82,7 +82,10 @@ def compilewithjson_to_func(json_str): if kernel_info['attr']: for ext_arg in kernel_info['attr']: op_attrs.append(ext_arg['value']) - mod = utils.op_build(op_func, input_shapes, input_types, op_attrs, kernel_info['op']) + dump_ir = os.getenv('MS_AKG_DUMP_IR') == "on" + dump_code = os.getenv('MS_AKG_DUMP_CODE') == "on" + mod = utils.op_build(op_func, input_shapes, input_types, op_attrs, kernel_info['op'], dump_ir=dump_ir, + dump_code=dump_code) return True else: op_func = getattr(cce, op_name, None) diff --git a/python/akg/ms/op_build.py b/python/akg/ms/op_build.py index 0e16289..345f3cd 100644 --- a/python/akg/ms/op_build.py +++ b/python/akg/ms/op_build.py @@ -31,7 +31,7 @@ from akg.utils import validation_check as vc_util BINDS = "binds" MS_AKG_DUMP_IR = "MS_AKG_DUMP_IR" -MS_AKG_DUMP_CCE = "MS_AKG_DUMP_CCE" +MS_AKG_DUMP_CODE = "MS_AKG_DUMP_CODE" MS_DAVINCI_KERNEL_PATH = "./kernel_meta/" diff --git a/python/akg/ops/nn/maxpool.py b/python/akg/ops/nn/maxpool.py index 00f4aad..b91456f 100644 --- a/python/akg/ops/nn/maxpool.py +++ b/python/akg/ops/nn/maxpool.py @@ -294,7 +294,7 @@ def maxpool_manual_schedule(shape, kernel, stride, padding, dtype, attrs=None, p mod = akg.build(s, [data, res], "cce", name="maxpool_manual_schedule", attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "maxpool_ad_manual_schedule" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod def pad_strategy_check(strategy): diff --git a/python/akg/ops/nn/maxpool_ad.py b/python/akg/ops/nn/maxpool_ad.py index c5a102d..0b1f66a 100644 --- a/python/akg/ops/nn/maxpool_ad.py +++ b/python/akg/ops/nn/maxpool_ad.py @@ -387,7 +387,7 @@ def maxpool_ad_manual_schedule_all_max(shape, kernel, stride, pad, dtype, polyhe attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "maxpool_ad_manual_schedule_all_max" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod @@ -489,5 +489,5 @@ def maxpool_ad_manual_schedule_no_overlap_all_max(shape, kernel, stride, pad, dt name="maxpool_ad_manual_schedule_no_overlap_all_max", attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "maxpool_ad_manual_schedule_no_overlap_all_max" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod diff --git a/python/akg/utils/dump_cuda_meta.py b/python/akg/utils/dump_cuda_meta.py index 358e12b..e518b89 100644 --- a/python/akg/utils/dump_cuda_meta.py +++ b/python/akg/utils/dump_cuda_meta.py @@ -64,7 +64,19 @@ def save_gpu_params(s, args, kernel_info): ptx_code = kernel_info[0] file_name = kernel_info[1] kernel_name = kernel_info[2] - ir = str(akg.tvm.lower(s, args, simple_mode=True)) + + + dump_ir = os.getenv('MS_AKG_DUMP_IR') == "on" + if dump_ir: + schedule_path = os.path.realpath(kernel_name) + all_passes = os.listdir(schedule_path) + for cur_pass in all_passes: + if cur_pass.startswith("00_"): + with open(schedule_path + '/' + cur_pass, "r") as file: + ir = file.read() + break + else: + ir = str(akg.tvm.lower(s, args, simple_mode=True)) file_path = os.path.realpath(file_name) if os.path.exists(file_path): os.remove(file_path) diff --git a/python/akg/utils/kernel_exec.py b/python/akg/utils/kernel_exec.py index 97ebf16..9167cff 100644 --- a/python/akg/utils/kernel_exec.py +++ b/python/akg/utils/kernel_exec.py @@ -67,29 +67,42 @@ def func_time_required(func_name): return wrapper -def create_cce(kernel_name, cce_path=None, code=None): +def create_code(kernel_name, code_path=None, code=None, code_type="CCE"): """ - Create cce file. + Create cce or cuda file. Args: - kernel_name: cce file name. - cce_path: cce file path. - code: cce code. + kernel_name: file name. + code_path: file path. + code: code. + code_type: code type. """ - if cce_path: - if len(cce_path) > 4 and cce_path[-4:].lower() == ".cce": - real_path = cce_path + if code_type == "CCE": + postfix = ".cce" + elif code_type == "CUDA": + postfix = ".cu" + else: + logging.info("the target code type %s is not supported.", code_type) + + if not code_path: + code_path = "./" + + if code_type == "CCE" and len(code_path) > 4 and code_path[-4:].lower() == postfix: + real_path = code_path + elif code_type == "CUDA" and len(code_path) > 3 and code_path[-3:].lower() == postfix: + real_path = code_path + else: + if code_path[-1] == r"/": + real_path = code_path + kernel_name + postfix else: - if cce_path[-1] == r"/": - real_path = cce_path + kernel_name + ".cce" - else: - real_path = cce_path + r"/" + kernel_name + ".cce" - dir_path = r"/".join(real_path.split(r"/")[:-1]) - if not os.path.isdir(dir_path): - os.makedirs(dir_path) + real_path = code_path + r"/" + kernel_name + postfix + dir_path = r"/".join(real_path.split(r"/")[:-1]) + if not os.path.isdir(dir_path): + os.makedirs(dir_path) + + with open(real_path, 'wt') as ss: + ss.write(code) - with open(real_path, 'wt') as ss: - ss.write(code) def gen_name_kernel(kernel, dtype, shapes): @@ -538,7 +551,7 @@ def gen_kernel_name(input_shapes, input_types, op_attrs=None, kernel_name=""): @func_time_required def op_build_test(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", - attrs=None, log_cce=False, dump_ir=True, dump_cce=True, + attrs=None, log_cce=False, dump_ir=True, dump_code=True, polyhedral=True, tuning=False): """ Return module from op_build with given inputs, distinguish tuning mode. @@ -552,7 +565,7 @@ def op_build_test(op_func, input_shapes, input_types, op_attrs=None, kernel_name attrs (dict): tiling parameter. log_cce (bool): False by default. dump_ir (bool): True by default. - dump_cce (bool): False by default. + dump_code (bool): False by default. polyhedral (bool): True by default. tuning (bool): False by default. @@ -565,7 +578,7 @@ def op_build_test(op_func, input_shapes, input_types, op_attrs=None, kernel_name kernel_name = gen_kernel_name(input_shapes, input_types, op_attrs, kernel_name) logging.debug('kernel_name---------- %s', str(kernel_name)) mod = op_build(op_func, input_shapes, input_types, op_attrs, kernel_name, - attrs, log_cce, dump_ir, dump_cce, + attrs, log_cce, dump_ir, dump_code, polyhedral, tuning) return mod @@ -593,7 +606,7 @@ def recursive_copy(obj): def op_build(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", - attrs=None, log_cce=False, dump_ir=True, dump_cce=True, + attrs=None, log_cce=False, dump_ir=True, dump_code=True, polyhedral=True, tuning=False): """ Return module built from op_func with given inputs. @@ -607,7 +620,7 @@ def op_build(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", attrs (dict): tiling parameter. log_cce (bool): False by default. dump_ir (bool): True by default. - dump_cce (bool): False by default. + dump_code (bool): False by default. polyhedral (bool): True by default. tuning (bool): False by default. @@ -730,9 +743,13 @@ def op_build(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", kernel_name = kernel_name if kernel_name != "" else sch_tmpl['op_name'] with akg.tvm.target.cuda() as target: s = sch_tmpl['schedule'](sch_tmpl['output']) - with akg.build_config(dump_pass_ir=True): - mod = akg.build(s, op_var, "cuda", shape_var, name=kernel_name, attrs=attrs, polyhedral=polyhedral, binds=binds) + with akg.tvm.build_config(dump_pass_ir=dump_ir): + mod = akg.build(s, op_var, "cuda", shape_var, name=kernel_name, attrs=attrs, + polyhedral=polyhedral, binds=binds) dump_cuda_meta.dump(mod, kernel_name, s, op_var) + if dump_code: + source_code = mod.imported_modules[0].get_source() + create_code(kernel_name, "./", source_code, "CUDA") return mod if isinstance(output, (list, tuple)): @@ -781,9 +798,9 @@ def op_build(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", if log_cce: logging.debug("#################cce code####################") logging.debug(source_code) - if dump_cce: - cce_path = "./" - create_cce(kernel_name, cce_path, source_code) + if dump_code: + code_path = "./" + create_code(kernel_name, code_path, source_code) return mod diff --git a/src/codegen/build_module.cc b/src/codegen/build_module.cc index 1f8bfd2..e1c258f 100644 --- a/src/codegen/build_module.cc +++ b/src/codegen/build_module.cc @@ -1110,8 +1110,8 @@ air::runtime::Module BuildToModule(const NodeRef &ref, const std::string &target mhost.Import(mdev); } - const char *akg_dump_cce = getenv("MS_AKG_DUMP_CCE"); - if (akg_dump_cce != nullptr) { + const char *akg_dump_code = getenv("MS_AKG_DUMP_CODE"); + if (akg_dump_code != nullptr) { auto mod0 = mhost->imports()[0]; CHECK(mod0.defined()); diff --git a/tests/common/test_op/add_a_conv.py b/tests/common/test_op/add_a_conv.py index 85a0067..aec9a68 100644 --- a/tests/common/test_op/add_a_conv.py +++ b/tests/common/test_op/add_a_conv.py @@ -207,7 +207,7 @@ def add_a_conv(fmap_shape, filter_shape, pad_, stride_, dilation_, mod = akg.build(s, [a_value, b_value, conv], "cce", name=kernel_name, attrs=attrs, polyhedral=True) source_code = mod.imported_modules[0].get_source() cce_path = '.' - utils.create_cce(kernel_name, cce_path, source_code) + utils.create_code(kernel_name, cce_path, source_code) return mod diff --git a/tests/common/test_op/add_b_conv.py b/tests/common/test_op/add_b_conv.py index de56431..fbdd51e 100644 --- a/tests/common/test_op/add_b_conv.py +++ b/tests/common/test_op/add_b_conv.py @@ -201,7 +201,7 @@ def add_b_conv(fmap_shape, filter_shape, pad_, stride_, dilation_, mod = akg.build(s, [a_value, b_value, conv], "cce", name=kernel_name, attrs=attrs, polyhedral=True) source_code = mod.imported_modules[0].get_source() cce_path = '.' - utils.create_cce(kernel_name, cce_path, source_code) + utils.create_code(kernel_name, cce_path, source_code) return mod diff --git a/tests/common/test_op/col2im_compute.py b/tests/common/test_op/col2im_compute.py index 184275f..63a5451 100644 --- a/tests/common/test_op/col2im_compute.py +++ b/tests/common/test_op/col2im_compute.py @@ -64,5 +64,5 @@ def col2im_manual_schedule(shape, kernel, stride, pad, dtype, output_H_W, polyhe mod = akg.build(s, [data, res], "cce", name="col2im_manual_schedule", attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "col2im_manual_schedule" - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) return mod diff --git a/tests/common/test_op/gather.py b/tests/common/test_op/gather.py index 0c4f315..adbb88a 100644 --- a/tests/common/test_op/gather.py +++ b/tests/common/test_op/gather.py @@ -115,6 +115,6 @@ def gather(params_shape, indices_shape, params_dtype, indices_dtype, axis, kerne mod = akg.build(s, [xx, yy, res], "cce", name=kernel_name, attrs=attrs) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, cce_path, source_code) + utils.create_code(kernel_name, cce_path, source_code) return mod diff --git a/tests/common/test_op/im2col_compute.py b/tests/common/test_op/im2col_compute.py index b17a0b7..569ada8 100644 --- a/tests/common/test_op/im2col_compute.py +++ b/tests/common/test_op/im2col_compute.py @@ -109,5 +109,5 @@ def im2col_manual_schedule(shape, kernel, stride, pad, dtype, polyhedral=True, a attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "im2col_manual_schedule" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod diff --git a/tests/common/test_op/reduce_max_ad.py b/tests/common/test_op/reduce_max_ad.py index ab6ad0e..78a86fc 100644 --- a/tests/common/test_op/reduce_max_ad.py +++ b/tests/common/test_op/reduce_max_ad.py @@ -200,5 +200,5 @@ def reduce_max_ad_optimized_manual_schedule(input_shape, dtype, axis, keepdims, name="reduce_max_ad_manual_schedule", attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "reduce_max_ad_manual_schedule" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod diff --git a/tests/common/test_op/reduce_min_ad.py b/tests/common/test_op/reduce_min_ad.py index c2f7628..9d4ae08 100644 --- a/tests/common/test_op/reduce_min_ad.py +++ b/tests/common/test_op/reduce_min_ad.py @@ -159,5 +159,5 @@ def reduce_min_ad_optimized_manual_schedule(input_shape, dtype, axis, keepdims, attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "reduce_min_ad_manual_schedule" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod diff --git a/tests/common/test_op/vector_matmul.py b/tests/common/test_op/vector_matmul.py index c9cbaaa..397a73f 100644 --- a/tests/common/test_op/vector_matmul.py +++ b/tests/common/test_op/vector_matmul.py @@ -105,5 +105,5 @@ def vector_matmul(data_m, data_n, data_k, trans_a, trans_b, dtype, kernel_name, with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True): mod = akg.build(forward_s, op_vars, "cce", name=kernel_name, attrs=attrs, polyhedral=True) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) return mod, output_shape diff --git a/tests/common/test_run/IOU_for_train_run.py b/tests/common/test_run/IOU_for_train_run.py index a9d5325..727dce5 100644 --- a/tests/common/test_run/IOU_for_train_run.py +++ b/tests/common/test_run/IOU_for_train_run.py @@ -81,7 +81,7 @@ def iou_for_train_run(shape_tensor, output = utils.mod_launch(mod, (anchor, ground_truth, output), expect=expect) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/avgpool_ad_run.py b/tests/common/test_run/avgpool_ad_run.py index 3b36fb3..7c7c916 100644 --- a/tests/common/test_run/avgpool_ad_run.py +++ b/tests/common/test_run/avgpool_ad_run.py @@ -41,7 +41,7 @@ def avgpool_ad_run(shape, kernel, stride, pad, dtype, polyhedral=False, attrs=No input = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype]) y = avgpool_run.benchmark(input, kernel, stride, pad) mod = utils.op_build_test(avgpool, [y.shape, shape], [dtype, dtype], op_attrs=[kernel, stride, pad], - kernel_name=kernel_name, attrs=attrs, log_cce=True, dump_cce=True, tuning=t) + kernel_name=kernel_name, attrs=attrs, log_cce=True, dump_code=True, tuning=t) if t: expect, head, output = gen_data(dtype, input, kernel, pad, stride, support_list, y) return mod, expect, (head, input, output) @@ -51,7 +51,7 @@ def avgpool_ad_run(shape, kernel, stride, pad, dtype, polyhedral=False, attrs=No input = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype]) y = avgpool_run.benchmark(input, kernel, stride, pad) mod = utils.op_build_test(avgpool, [y.shape, shape], [dtype, dtype], op_attrs=[kernel, stride, pad], - kernel_name=kernel_name, attrs=attrs, log_cce=True, dump_cce=True) + kernel_name=kernel_name, attrs=attrs, log_cce=True, dump_code=True) expect, head, output = gen_data(dtype, input, kernel, pad, stride, support_list, y) output = utils.mod_launch(mod, [head, input, output], expect=expect) diff --git a/tests/common/test_run/bounding_box_encode_run.py b/tests/common/test_run/bounding_box_encode_run.py index 17720ee..d076aca 100644 --- a/tests/common/test_run/bounding_box_encode_run.py +++ b/tests/common/test_run/bounding_box_encode_run.py @@ -197,7 +197,7 @@ def bounding_box_encode_run(anchor_box_shape, groundtruth_box_shape, anchor_samp mod = utils.op_build_test(bounding_box_encode.bouding_box_encode, [anchor_box_shape, groundtruth_box_shape, anchor_samples_shape], [dtype, dtype, "int32"], - op_attrs, kernel_name=kernel_name, attrs=attrs, dump_cce=True, tuning=t) + op_attrs, kernel_name=kernel_name, attrs=attrs, dump_code=True, tuning=t) if t: anchor_box_data, anchor_samples_data, expect, groundtruth_box_data, output_data = gen_data(anchor_box_shape, anchor_samples_shape, @@ -211,7 +211,7 @@ def bounding_box_encode_run(anchor_box_shape, groundtruth_box_shape, anchor_samp mod = utils.op_build_test(bounding_box_encode.bouding_box_encode, [anchor_box_shape, groundtruth_box_shape, anchor_samples_shape], [dtype, dtype, "int32"], - op_attrs, kernel_name=kernel_name, attrs=attrs, dump_cce=True) + op_attrs, kernel_name=kernel_name, attrs=attrs, dump_code=True) anchor_box_data, anchor_samples_data, expect, groundtruth_box_data, output_data = gen_data(anchor_box_shape, anchor_samples_shape, dtype, epsilon, diff --git a/tests/common/test_run/conv_filter_ad_run.py b/tests/common/test_run/conv_filter_ad_run.py index 04f636a..3bd5e37 100644 --- a/tests/common/test_run/conv_filter_ad_run.py +++ b/tests/common/test_run/conv_filter_ad_run.py @@ -192,7 +192,7 @@ def conv_filter_ad_run(fmap_shape, filter_shape, pad_, stride_, dilation_, attr return np_input, out_data, expect, True mod = utils.op_build_test(conv_filter_ad.conv_filter_ad, [dw_input_shapes], [conv_dtype], - op_attrs=[fmap_shape, filter_shape, pad_, stride_, dilation_], kernel_name='conv_filter_ad', attrs=attrs, dump_cce = True) + op_attrs=[fmap_shape, filter_shape, pad_, stride_, dilation_], kernel_name='conv_filter_ad', attrs=attrs, dump_code = True) args = (dy_data, dx_data, out_data) out_data = utils.mod_launch(mod, args, expect=expect) rtol, atol = get_rtol_atol("conv_filter_ad", conv_dtype) diff --git a/tests/common/test_run/conv_run_mansch.py b/tests/common/test_run/conv_run_mansch.py index d9d221d..6795678 100644 --- a/tests/common/test_run/conv_run_mansch.py +++ b/tests/common/test_run/conv_run_mansch.py @@ -34,7 +34,7 @@ def conv_run_mansch(FMap_shape, Filter_shape, Pad, Stride, Dilation=None, use_bi use_bias=use_bias, fp32_mad=fp32_mad, kernel_name="conv_mansch") source_code = mod.imported_modules[0].get_source() - utils.create_cce("conv_mansch", ".", source_code) + utils.create_code("conv_mansch", ".", source_code) A, B, bias_data, expect = gen_data(FMap_shape, Filter_shape, Pad, Stride, Dilation, use_bias) expect = expect.reshape((expect.shape[0], expect.shape[1], expect.shape[2]*expect.shape[3],expect.shape[4])) # output on conv2d is in 4d format diff --git a/tests/common/test_run/distr_bernoulli_logprob_ad_run.py b/tests/common/test_run/distr_bernoulli_logprob_ad_run.py index 7129e4b..064d9a7 100644 --- a/tests/common/test_run/distr_bernoulli_logprob_ad_run.py +++ b/tests/common/test_run/distr_bernoulli_logprob_ad_run.py @@ -26,7 +26,7 @@ def logprob_ad_run(shape, dtype, kernel_name="", attrs=None): mod = utils.op_build_test(distr_bernoulli_logprob_ad.bernoulli_logprob_ad, [head.shape, x.shape, probs.shape], [dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=None, attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=None, attrs=None, log_cce=True, dump_code=True, polyhedral=True) outputs = utils.mod_launch(mod, [head, x, probs, *outputs], outputs=tuple(range(-len(outputs), 0)), expect=expects) outputs = list(outputs) diff --git a/tests/common/test_run/distr_bernoulli_logprob_run.py b/tests/common/test_run/distr_bernoulli_logprob_run.py index 72b2525..1c0a73a 100644 --- a/tests/common/test_run/distr_bernoulli_logprob_run.py +++ b/tests/common/test_run/distr_bernoulli_logprob_run.py @@ -29,7 +29,7 @@ def log_prob_run(shape, dtype, kernelname="", attrs = None): mod = utils.op_build_test(log_prob_op, [x.shape, probs.shape], [dtype, dtype], kernel_name=kernelname, - op_attrs=[], attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=[], attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [x, probs, output], expect=expect) return (x, probs), output, expect, compare_tensor(output, expect, rtol=1e-03, atol=1e-03, equal_nan=True) diff --git a/tests/common/test_run/distr_normal_diag_KLdiv_ad_run.py b/tests/common/test_run/distr_normal_diag_KLdiv_ad_run.py index a20d409..e1c7cc5 100644 --- a/tests/common/test_run/distr_normal_diag_KLdiv_ad_run.py +++ b/tests/common/test_run/distr_normal_diag_KLdiv_ad_run.py @@ -25,7 +25,7 @@ def KLdiv_ad_run(shape, dtype, kernel_name="", attrs=None): mod = utils.op_build_test(distr_normal_diag_KLdiv_ad.normal_diag_KLdiv_ad, [head.shape, mean.shape, scale.shape], [dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=None, attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=None, attrs=None, log_cce=True, dump_code=True, polyhedral=True) outputs = utils.mod_launch(mod, [head, mean, scale, *outputs], outputs=tuple(range(-len(outputs), 0)), expect=expects) outputs = list(outputs) diff --git a/tests/common/test_run/distr_normal_diag_KLdiv_run.py b/tests/common/test_run/distr_normal_diag_KLdiv_run.py index b668f4d..b2143b1 100644 --- a/tests/common/test_run/distr_normal_diag_KLdiv_run.py +++ b/tests/common/test_run/distr_normal_diag_KLdiv_run.py @@ -27,7 +27,7 @@ def KLdiv_run(shape, dtype, kernelname="", attrs = None): mod = utils.op_build_test(KLdiv_op, [mean.shape, scale.shape], [dtype, dtype], kernel_name=kernelname, - op_attrs=[], attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=[], attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [mean, scale, output], expect = expect) return (mean, scale), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/distr_normal_diag_logprob_ad_run.py b/tests/common/test_run/distr_normal_diag_logprob_ad_run.py index 90e2548..386108f 100644 --- a/tests/common/test_run/distr_normal_diag_logprob_ad_run.py +++ b/tests/common/test_run/distr_normal_diag_logprob_ad_run.py @@ -30,7 +30,7 @@ def logprob_ad_run(shape, dtype, kernel_name="", attrs=None): op_attrs=None, attrs=None, log_cce=True, - dump_cce=True, + dump_code=True, polyhedral=True, ) outputs = utils.mod_launch( diff --git a/tests/common/test_run/distr_normal_diag_logprob_run.py b/tests/common/test_run/distr_normal_diag_logprob_run.py index ea00945..60bd45c 100644 --- a/tests/common/test_run/distr_normal_diag_logprob_run.py +++ b/tests/common/test_run/distr_normal_diag_logprob_run.py @@ -28,7 +28,7 @@ def logprob_run(shape, dtype, kernelname="", attrs = None): mod = utils.op_build_test(logprob_op, [x.shape, mean.shape, scale.shape], [dtype, dtype, dtype], kernel_name=kernelname, - op_attrs=[], attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=[], attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [x, mean, scale, output], expect = expect) return (x, mean, scale), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/distr_normal_diag_sample_ad_run.py b/tests/common/test_run/distr_normal_diag_sample_ad_run.py index ab03522..318db77 100644 --- a/tests/common/test_run/distr_normal_diag_sample_ad_run.py +++ b/tests/common/test_run/distr_normal_diag_sample_ad_run.py @@ -24,7 +24,7 @@ def sample_ad_run(shape, dtype, kernel_name="", attrs=None): mod = utils.op_build_test(distr_normal_diag_sample_ad.normal_diag_sample_ad, [head.shape, mean.shape, scale.shape, eps.shape], [dtype, dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=None, attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=None, attrs=None, log_cce=True, dump_code=True, polyhedral=True) outputs = utils.mod_launch(mod, [head, mean, scale, eps, *outputs], outputs=tuple(range(-len(outputs), 0)), expect=expects) outputs = list(outputs) diff --git a/tests/common/test_run/distr_normal_diag_sample_run.py b/tests/common/test_run/distr_normal_diag_sample_run.py index 4154796..c321ff6 100644 --- a/tests/common/test_run/distr_normal_diag_sample_run.py +++ b/tests/common/test_run/distr_normal_diag_sample_run.py @@ -26,7 +26,7 @@ def sample_run(shape, dtype, kernel_name="", attrs=None): mod = utils.op_build_test(sample_op, [mean.shape, scale.shape, eps.shape], [dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=None, attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=None, attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [mean, scale, eps, output], expect=expect) return (mean, scale, eps), output, expect, compare_tensor(output, expect, rtol=5e-03, atol=0.1, equal_nan=True) diff --git a/tests/common/test_run/distr_normal_prob_regr_train_run.py b/tests/common/test_run/distr_normal_prob_regr_train_run.py index 3fd9167..d6b30b1 100644 --- a/tests/common/test_run/distr_normal_prob_regr_train_run.py +++ b/tests/common/test_run/distr_normal_prob_regr_train_run.py @@ -25,7 +25,7 @@ def prob_regression_run(shape, dtype, kernel_name, attrs): mod = utils.op_build_test(distr_normal_prob_regr_train.prob_regression_train, [x.shape, w.shape, y.shape], [dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=[], attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=[], attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [x, w, y, output], expect=expect) return (x, w, y), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/dropout_run.py b/tests/common/test_run/dropout_run.py index b1b85fc..0741f60 100644 --- a/tests/common/test_run/dropout_run.py +++ b/tests/common/test_run/dropout_run.py @@ -83,7 +83,7 @@ def dropout_execute(shape_tensor, keep_prob, dtype, kernel_name, attrs=None): output = utils.mod_launch(mod, (input, mask, output), expect=expect) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) rtol, atol = get_rtol_atol("dropout", dtype) return (input, mask), output, expect, compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True) diff --git a/tests/common/test_run/kldiv_loss_grad_run.py b/tests/common/test_run/kldiv_loss_grad_run.py index bd4268e..deee6de 100644 --- a/tests/common/test_run/kldiv_loss_grad_run.py +++ b/tests/common/test_run/kldiv_loss_grad_run.py @@ -28,7 +28,7 @@ def kldiv_loss_grad_run(shape, dtype, kernel_name="kldiv_loss_grad", attrs=None) t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(kldiv_loss_grad.kldiv_loss_grad, [shape, shape, shape], [dtype, dtype, dtype], - kernel_name=kernel_name, attrs=attrs, dump_cce=True, tuning=t) + kernel_name=kernel_name, attrs=attrs, dump_code=True, tuning=t) if t: cur_deriv, output, pre_deriv, prediction, target = gen_data(attrs, dtype, shape) return mod, cur_deriv, (pre_deriv, prediction, target, output) @@ -36,7 +36,7 @@ def kldiv_loss_grad_run(shape, dtype, kernel_name="kldiv_loss_grad", attrs=None) return mod else: mod = utils.op_build_test(kldiv_loss_grad.kldiv_loss_grad, [shape, shape, shape], [dtype, dtype, dtype], - kernel_name=kernel_name, attrs=attrs, dump_cce=True) + kernel_name=kernel_name, attrs=attrs, dump_code=True) cur_deriv, output, pre_deriv, prediction, target = gen_data(attrs, dtype, shape) output = utils.mod_launch(mod, (pre_deriv, prediction, target, output), expect=cur_deriv) return (pre_deriv, prediction, target), output, cur_deriv, compare_tensor(output, cur_deriv, rtol=0.005, diff --git a/tests/common/test_run/l1_loss_grad_run.py b/tests/common/test_run/l1_loss_grad_run.py index c1c4f4c..d1bcf62 100644 --- a/tests/common/test_run/l1_loss_grad_run.py +++ b/tests/common/test_run/l1_loss_grad_run.py @@ -28,7 +28,7 @@ def l1_loss_grad_run(shape, dtype, kernel_name="l1_loss_grad", attrs=None): t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(l1_loss_grad.l1_loss_grad, [shape, shape, shape], [dtype, dtype, dtype], - kernel_name=kernel_name, attrs=attrs, dump_cce=True, tuning=t) + kernel_name=kernel_name, attrs=attrs, dump_code=True, tuning=t) if t: dloss, expect, output, prediction, target = gen_data(dtype, shape) return mod, expect, (dloss, prediction, target, output) @@ -36,7 +36,7 @@ def l1_loss_grad_run(shape, dtype, kernel_name="l1_loss_grad", attrs=None): return mod else: mod = utils.op_build_test(l1_loss_grad.l1_loss_grad, [shape, shape, shape], [dtype, dtype, dtype], - kernel_name=kernel_name, attrs=attrs, dump_cce=True) + kernel_name=kernel_name, attrs=attrs, dump_code=True) dloss, expect, output, prediction, target = gen_data(dtype, shape) output = utils.mod_launch(mod, (dloss, prediction, target, output), expect=expect) return (dloss, prediction, target), output, expect, compare_tensor(output, expect, rtol=0.001, atol=0.001) diff --git a/tests/common/test_run/matmul_run_mansch.py b/tests/common/test_run/matmul_run_mansch.py index 3775eae..9e52d9e 100644 --- a/tests/common/test_run/matmul_run_mansch.py +++ b/tests/common/test_run/matmul_run_mansch.py @@ -45,7 +45,7 @@ def matmul_run_mansch(MatrixShape, l1_tiling, l0_tiling, kernel_name, attrs=None # launch the kernel mod = matmul_mansch.gemm_dsl(MatrixShape, l1_tiling, l0_tiling, kernel_name) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, ".", source_code) + utils.create_code(kernel_name, ".", source_code) res = utils.mod_launch(mod, [A, B, out_data]) # transform numpy data to compute benchMark diff --git a/tests/common/test_run/maxpool_ad_run.py b/tests/common/test_run/maxpool_ad_run.py index 1490783..805f5d9 100644 --- a/tests/common/test_run/maxpool_ad_run.py +++ b/tests/common/test_run/maxpool_ad_run.py @@ -44,14 +44,14 @@ def maxpool_ad_run(shape, kernel, stride, pad, dtype, optimized, polyhedral=Fals else: mod = utils.op_build_test(maxpool_ad_no_custom_diff_poly_all_max, [head.shape, shape], [dtype, dtype], kernel_name="maxpool_ad_no_custom_diff_poly_all_max", - op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_cce=True, polyhedral=polyhedral) + op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_code=True, polyhedral=polyhedral) output = utils.mod_launch(mod, [head, input, output], expect=expect) else: if optimized: if first_max: mod = utils.op_build_test(maxpool_ad, [head.shape, shape, forward.shape, mask.shape], [dtype, dtype, dtype, dtype], kernel_name="maxpool_ad_first_max", - op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_cce=True, polyhedral=polyhedral) + op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_code=True, polyhedral=polyhedral) output = utils.mod_launch(mod, [head, input, forward, mask, output], expect=expect) else: mod = maxpool_ad_manual_schedule_all_max(shape, kernel, stride, pad, dtype, attrs=attrs, polyhedral=polyhedral) @@ -62,7 +62,7 @@ def maxpool_ad_run(shape, kernel, stride, pad, dtype, optimized, polyhedral=Fals else: mod = utils.op_build_test(maxpool_ad_no_custom_diff_manual_schedule_all_max, [head.shape, shape], [dtype, dtype], kernel_name="maxpool_ad_no_custom_diff_manual_schedule_all_max", - op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_cce=True, polyhedral=polyhedral) + op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_code=True, polyhedral=polyhedral) output = utils.mod_launch(mod, [head, input, output], expect=expect) if 'tuning' in attrs.keys(): diff --git a/tests/common/test_run/maxpool_grad_run.py b/tests/common/test_run/maxpool_grad_run.py index 5d02eb8..2a5c95f 100644 --- a/tests/common/test_run/maxpool_grad_run.py +++ b/tests/common/test_run/maxpool_grad_run.py @@ -100,7 +100,7 @@ def maxpool_grad_run(shape, kernel, stride, pad, dtype, attrs): mod = utils.op_build_test(maxpool_grad.maxpool_grad, [shape, y_shape, y_shape], [dtype, dtype, dtype], op_attrs=[kernel, stride, pad], - kernel_name=kernel_name, attrs=attrs, dump_cce=True, tuning=t) + kernel_name=kernel_name, attrs=attrs, dump_code=True, tuning=t) if t: dy, expect, output, x, y = \ gen_data(dtype, kernel, pad, shape, stride, y_shape) @@ -111,7 +111,7 @@ def maxpool_grad_run(shape, kernel, stride, pad, dtype, attrs): mod = utils.op_build_test(maxpool_grad.maxpool_grad, [shape, y_shape, y_shape], [dtype, dtype, dtype], op_attrs=[kernel, stride, pad], - kernel_name='maxpool_grad', attrs=attrs, dump_cce=True) + kernel_name='maxpool_grad', attrs=attrs, dump_code=True) dy, expect, output, x, y = \ gen_data(dtype, kernel, pad, shape, stride, y_shape) output = utils.mod_launch(mod, (x, y, dy, output), expect=expect) diff --git a/tests/common/test_run/maxpool_grad_with_argmax_run.py b/tests/common/test_run/maxpool_grad_with_argmax_run.py index 93f884e..b456bd7 100644 --- a/tests/common/test_run/maxpool_grad_with_argmax_run.py +++ b/tests/common/test_run/maxpool_grad_with_argmax_run.py @@ -31,7 +31,7 @@ def maxpool_grad_with_argmax_run(shape, kernel, stride, pad, dtype, polyhedral=F mod = utils.op_build_test(maxpool_grad_with_argmax, [head.shape, mask.shape], [dtype, dtype], kernel_name="maxpool_grad_with_argmax", op_attrs=[shape, kernel, stride, pad], attrs=attrs, - log_cce=False, dump_cce=True, polyhedral=polyhedral) + log_cce=False, dump_code=True, polyhedral=polyhedral) if t: return mod, expect, (head, mask, output) else: @@ -43,7 +43,7 @@ def maxpool_grad_with_argmax_run(shape, kernel, stride, pad, dtype, polyhedral=F mod = utils.op_build_test(maxpool_grad_with_argmax, [head.shape, mask.shape], [dtype, dtype], kernel_name="maxpool_grad_with_argmax", op_attrs=[shape, kernel, stride, pad], attrs=attrs, - log_cce=False, dump_cce=True, polyhedral=polyhedral) + log_cce=False, dump_code=True, polyhedral=polyhedral) output = utils.mod_launch(mod, [head, mask, output], expect=expect) rtol, atol = get_rtol_atol("maxpool_grad_with_argmax", dtype) diff --git a/tests/common/test_run/nms_run.py b/tests/common/test_run/nms_run.py index d5e7460..e35a438 100644 --- a/tests/common/test_run/nms_run.py +++ b/tests/common/test_run/nms_run.py @@ -94,6 +94,6 @@ def nms_run(shape_tensor, thres, dtype, kernel_name, attrs): output = utils.mod_launch(mod, (anchor, output), expect=expect) output = np.frombuffer(output.tobytes(), np.uint16).reshape(out_shape) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) expect = np.frombuffer(expect.tobytes(), np.uint16).reshape(out_shape) return anchor, output, expect, np.all(output == expect) diff --git a/tests/common/test_run/roipool_run.py b/tests/common/test_run/roipool_run.py index cecebf4..954ade4 100644 --- a/tests/common/test_run/roipool_run.py +++ b/tests/common/test_run/roipool_run.py @@ -32,7 +32,7 @@ def roipool_run(shape, roibox, pooled_shape, dtype, attrs, cce_path="./"): expect = roipool_expect(input1, shape, roibox, pooled_shape) # source_code = mod.imported_modules[0].get_source() - # utils.create_cce(kernel_name, cce_path, source_code) + # utils.create_code(kernel_name, cce_path, source_code) output = np.full(output_shape, np.nan, dtype) output = utils.mod_launch(mod, (input1, output), expect=expect) diff --git a/tests/common/test_run/smooth_l1_loss_grad_run.py b/tests/common/test_run/smooth_l1_loss_grad_run.py index afa78b4..cdf29c2 100644 --- a/tests/common/test_run/smooth_l1_loss_grad_run.py +++ b/tests/common/test_run/smooth_l1_loss_grad_run.py @@ -37,7 +37,7 @@ def smooth_l1_loss_grad_run(shape, dtype, attrs=None, kernel_name="smooth_l1_los kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(smooth_l1_loss_grad.smooth_l1_loss_grad, [sample_shape, shape, shape, sample_shape], [dtype, dtype, dtype, anchor_samples_dtype], op_attrs=[sigma, anchor_sample_correct], - attrs=attrs, kernel_name=kernel_name, dump_cce=True, tuning=t) + attrs=attrs, kernel_name=kernel_name, dump_code=True, tuning=t) if t: anchor_samples, dloss, expect, output, prediction, prediction_, target, target_ = gen_data( anchor_sample_correct, anchor_samples_dtype, dtype, sample_shape, shape, sigma) @@ -50,7 +50,7 @@ def smooth_l1_loss_grad_run(shape, dtype, attrs=None, kernel_name="smooth_l1_los mod = utils.op_build_test(smooth_l1_loss_grad.smooth_l1_loss_grad, [sample_shape, shape, shape, sample_shape], [dtype, dtype, dtype, anchor_samples_dtype], op_attrs=[sigma, anchor_sample_correct], - attrs=attrs, kernel_name=kernel_name, dump_cce=True) + attrs=attrs, kernel_name=kernel_name, dump_code=True) output = utils.mod_launch(mod, (dloss, prediction, target, anchor_samples, output), expect=expect) return (dloss, prediction, target, anchor_samples), output, expect, compare_tensor(output, expect, atol=5e-3, rtol=5e-3) diff --git a/tests/common/test_run/square_difference_run.py b/tests/common/test_run/square_difference_run.py index 4b5430f..88880a8 100644 --- a/tests/common/test_run/square_difference_run.py +++ b/tests/common/test_run/square_difference_run.py @@ -35,7 +35,7 @@ def square_difference_run(shape1, shape2, dtype, kernel_name, attrs, cce_path=". input_types=[dtype, dtype], kernel_name=kernel_name, attrs=attrs) expect, input1, input2, output = gen_data(dtype, shape1, shape2) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, cce_path, source_code) + utils.create_code(kernel_name, cce_path, source_code) output = utils.mod_launch(mod, (input1, input2, output), expect=expect) return (input1, input2), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/strided_slice_grad_run.py b/tests/common/test_run/strided_slice_grad_run.py index 37b8564..f21a8ac 100644 --- a/tests/common/test_run/strided_slice_grad_run.py +++ b/tests/common/test_run/strided_slice_grad_run.py @@ -81,7 +81,7 @@ def gen_data(begin, begin_mask, dtype, ellipsis_mask, end, end_mask, grad_shape, # source_code = mod.imported_modules[0].get_source() # print(source_code) # kernel_name = "cce_strided_slice_grad_fp16" - # utils.create_cce(kernel_name, './', source_code) + # utils.create_code(kernel_name, './', source_code) out_shape = input_shape output = np.full(out_shape, 0, dtype) return expect, grad, output diff --git a/tests/common/test_run/truncatemod_run.py b/tests/common/test_run/truncatemod_run.py index b3ce16a..1c2d2d3 100644 --- a/tests/common/test_run/truncatemod_run.py +++ b/tests/common/test_run/truncatemod_run.py @@ -26,7 +26,7 @@ def truncatemod_run(shape1, shape2, dtype, attrs): t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(truncatemod.truncatemod, [shape1, shape2], [dtype, dtype], kernel_name=kernel_name, - attrs=attrs, dump_cce=True, tuning=t) + attrs=attrs, dump_code=True, tuning=t) if t: expect, input1, input2, output = gen_data(dtype, shape1, shape2) return mod, expect, (input1, input2, output) @@ -35,7 +35,7 @@ def truncatemod_run(shape1, shape2, dtype, attrs): else: expect, input1, input2, output = gen_data(dtype, shape1, shape2) mod = utils.op_build_test(truncatemod.truncatemod, [shape1, shape2], [dtype, dtype], kernel_name="truncatemod", - attrs=attrs, dump_cce=True) + attrs=attrs, dump_code=True) output = utils.mod_launch(mod, (input1, input2, output), expect=expect) rtol, atol = get_rtol_atol("truncatemod", dtype) res = compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True) diff --git a/tests/common/test_run/vector_matmul_run.py b/tests/common/test_run/vector_matmul_run.py index cb95dd3..8a504a8 100644 --- a/tests/common/test_run/vector_matmul_run.py +++ b/tests/common/test_run/vector_matmul_run.py @@ -145,7 +145,7 @@ def vector_matmul_run(case_index, m, n, k, trans_a, trans_b, read_data, dump_dat # k = (k+15)//16*16 mod, out_shape = vector_matmul.vector_matmul(m, n, k, trans_a, trans_b, dtype, kernel_name, attrs) - utils.create_cce(kernel_name, "./", mod.imported_modules[0].get_source()) + utils.create_code(kernel_name, "./", mod.imported_modules[0].get_source()) # Generate data m_a, m_b, bench_mark = vector_matmul_data(case_index, m, n, k, trans_a, trans_b, read_data, dump_data, dtype) diff --git a/tests/common/test_run/winograd_ad_run.py b/tests/common/test_run/winograd_ad_run.py index a32670f..2621c25 100644 --- a/tests/common/test_run/winograd_ad_run.py +++ b/tests/common/test_run/winograd_ad_run.py @@ -35,7 +35,7 @@ def winograd_ad_run(filter_shape, tile, dtype, attrs): t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(winograd_ad, [head_np.shape, filter_shape], [dtype, dtype], kernel_name=kernel_name, - attrs=attrs, log_cce=True, dump_cce=True, tuning=t) + attrs=attrs, log_cce=True, dump_code=True, tuning=t) if t: expect, input_np, output = gen_data(filter_shape, RANGEFILL, dtype) return mod, expect, (head_np, input_np, output) @@ -45,7 +45,7 @@ def winograd_ad_run(filter_shape, tile, dtype, attrs): # scenario 1: expect, input_np, output = gen_data(filter_shape, RANGEFILL, dtype) mod = utils.op_build_test(winograd_ad, [head_np.shape, filter_shape], [dtype, dtype], kernel_name="winograd_ad", - attrs=attrs, log_cce=True, dump_cce=True) + attrs=attrs, log_cce=True, dump_code=True) output = utils.mod_launch(mod, [head_np, input_np, output], expect=expect) if not compare_tensor(output, expect, atol=0.1): return [head_np, input_np], output, expect, compare_tensor(output, expect, rtol=5e-03, atol=5e-03, diff --git a/tests/operators/cube/quant_conv.py b/tests/operators/cube/quant_conv.py index eafe126..9cd4b60 100644 --- a/tests/operators/cube/quant_conv.py +++ b/tests/operators/cube/quant_conv.py @@ -403,7 +403,7 @@ def test_CCE_Conv(fmap_shape, filter_shape, pad_, stride_, mod = akg.build(s, [A, B, ScaleQ, OffsetQ, out], "cce", name=kernel_name, attrs=attrs, attrs={"dim": info}, polyhedral=True) source_code = mod.imported_modules[0].get_source() # print(source_code) - # utils.create_cce(kernel_name, cce_path, source_code) + # utils.create_code(kernel_name, cce_path, source_code) if run_cce: run_conv(mod, fmap_shape, filter_shape, pad_[0], stride_[0], use_bias) -- GitLab