diff --git a/python/akg/composite/build_module.py b/python/akg/composite/build_module.py index a54d99a71182d587b0ae64179c77a40c0e3831e4..dfa9428cbb8cf454c0c3ad1fddee4da20181c349 100644 --- a/python/akg/composite/build_module.py +++ b/python/akg/composite/build_module.py @@ -176,7 +176,8 @@ def build_cuda(outputs, args, sch_name, kernel_name): } with tvm.target.cuda() as cuda: s = scheduler[sch_name](outputs) - with tvm.build_config(dump_pass_ir = True): + dump_ir = os.getenv('MS_AKG_DUMP_IR') == "on" + with tvm.build_config(dump_pass_ir = dump_ir): mod = tvm.build(s, args, cuda, name = kernel_name) dump_cuda_meta.dump(mod, kernel_name, s, list(args)) return mod diff --git a/python/akg/ms/message.py b/python/akg/ms/message.py index 8ed443fc4d0dec2e2ba845dbb75ca26bba454882..94e533a0d3a4fd30649aa73ec3f053656615a527 100644 --- a/python/akg/ms/message.py +++ b/python/akg/ms/message.py @@ -82,7 +82,10 @@ def compilewithjson_to_func(json_str): if kernel_info['attr']: for ext_arg in kernel_info['attr']: op_attrs.append(ext_arg['value']) - mod = utils.op_build(op_func, input_shapes, input_types, op_attrs, kernel_info['op']) + dump_ir = os.getenv('MS_AKG_DUMP_IR') == "on" + dump_code = os.getenv('MS_AKG_DUMP_CODE') == "on" + mod = utils.op_build(op_func, input_shapes, input_types, op_attrs, kernel_info['op'], dump_ir=dump_ir, + dump_code=dump_code) return True else: op_func = getattr(cce, op_name, None) diff --git a/python/akg/ms/op_build.py b/python/akg/ms/op_build.py index 0e1628960c810ed1c210ed864c56f3f58a2372d2..345f3cd89a38f1c4ee887b9c677cc0182b6f4026 100644 --- a/python/akg/ms/op_build.py +++ b/python/akg/ms/op_build.py @@ -31,7 +31,7 @@ from akg.utils import validation_check as vc_util BINDS = "binds" MS_AKG_DUMP_IR = "MS_AKG_DUMP_IR" -MS_AKG_DUMP_CCE = "MS_AKG_DUMP_CCE" +MS_AKG_DUMP_CODE = "MS_AKG_DUMP_CODE" MS_DAVINCI_KERNEL_PATH = "./kernel_meta/" diff --git a/python/akg/ops/nn/maxpool.py b/python/akg/ops/nn/maxpool.py index 00f4aad7ba4ce0788c5b82818719b13902d88444..b91456fc8a10dbb7f6f688d3a41220e2c535bade 100644 --- a/python/akg/ops/nn/maxpool.py +++ b/python/akg/ops/nn/maxpool.py @@ -294,7 +294,7 @@ def maxpool_manual_schedule(shape, kernel, stride, padding, dtype, attrs=None, p mod = akg.build(s, [data, res], "cce", name="maxpool_manual_schedule", attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "maxpool_ad_manual_schedule" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod def pad_strategy_check(strategy): diff --git a/python/akg/ops/nn/maxpool_ad.py b/python/akg/ops/nn/maxpool_ad.py index c5a102d73e873c78dd00398ccc18d5730cde5091..0b1f66a652c1d5afb32de6bbaa8c301c9449f754 100644 --- a/python/akg/ops/nn/maxpool_ad.py +++ b/python/akg/ops/nn/maxpool_ad.py @@ -387,7 +387,7 @@ def maxpool_ad_manual_schedule_all_max(shape, kernel, stride, pad, dtype, polyhe attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "maxpool_ad_manual_schedule_all_max" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod @@ -489,5 +489,5 @@ def maxpool_ad_manual_schedule_no_overlap_all_max(shape, kernel, stride, pad, dt name="maxpool_ad_manual_schedule_no_overlap_all_max", attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "maxpool_ad_manual_schedule_no_overlap_all_max" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod diff --git a/python/akg/utils/dump_cuda_meta.py b/python/akg/utils/dump_cuda_meta.py index 358e12b91a262acdf3931d00d474e6ab5783ad52..e518b89bb985e6fc71cea1a8d37feefdcc6ace0e 100644 --- a/python/akg/utils/dump_cuda_meta.py +++ b/python/akg/utils/dump_cuda_meta.py @@ -64,7 +64,19 @@ def save_gpu_params(s, args, kernel_info): ptx_code = kernel_info[0] file_name = kernel_info[1] kernel_name = kernel_info[2] - ir = str(akg.tvm.lower(s, args, simple_mode=True)) + + + dump_ir = os.getenv('MS_AKG_DUMP_IR') == "on" + if dump_ir: + schedule_path = os.path.realpath(kernel_name) + all_passes = os.listdir(schedule_path) + for cur_pass in all_passes: + if cur_pass.startswith("00_"): + with open(schedule_path + '/' + cur_pass, "r") as file: + ir = file.read() + break + else: + ir = str(akg.tvm.lower(s, args, simple_mode=True)) file_path = os.path.realpath(file_name) if os.path.exists(file_path): os.remove(file_path) diff --git a/python/akg/utils/kernel_exec.py b/python/akg/utils/kernel_exec.py index 97ebf1616e34b427b4387141909bfb69a6d5f1d3..9167cff1bba45b82efd2d0f954973f37f6e35f5d 100644 --- a/python/akg/utils/kernel_exec.py +++ b/python/akg/utils/kernel_exec.py @@ -67,29 +67,42 @@ def func_time_required(func_name): return wrapper -def create_cce(kernel_name, cce_path=None, code=None): +def create_code(kernel_name, code_path=None, code=None, code_type="CCE"): """ - Create cce file. + Create cce or cuda file. Args: - kernel_name: cce file name. - cce_path: cce file path. - code: cce code. + kernel_name: file name. + code_path: file path. + code: code. + code_type: code type. """ - if cce_path: - if len(cce_path) > 4 and cce_path[-4:].lower() == ".cce": - real_path = cce_path + if code_type == "CCE": + postfix = ".cce" + elif code_type == "CUDA": + postfix = ".cu" + else: + logging.info("the target code type %s is not supported.", code_type) + + if not code_path: + code_path = "./" + + if code_type == "CCE" and len(code_path) > 4 and code_path[-4:].lower() == postfix: + real_path = code_path + elif code_type == "CUDA" and len(code_path) > 3 and code_path[-3:].lower() == postfix: + real_path = code_path + else: + if code_path[-1] == r"/": + real_path = code_path + kernel_name + postfix else: - if cce_path[-1] == r"/": - real_path = cce_path + kernel_name + ".cce" - else: - real_path = cce_path + r"/" + kernel_name + ".cce" - dir_path = r"/".join(real_path.split(r"/")[:-1]) - if not os.path.isdir(dir_path): - os.makedirs(dir_path) + real_path = code_path + r"/" + kernel_name + postfix + dir_path = r"/".join(real_path.split(r"/")[:-1]) + if not os.path.isdir(dir_path): + os.makedirs(dir_path) + + with open(real_path, 'wt') as ss: + ss.write(code) - with open(real_path, 'wt') as ss: - ss.write(code) def gen_name_kernel(kernel, dtype, shapes): @@ -538,7 +551,7 @@ def gen_kernel_name(input_shapes, input_types, op_attrs=None, kernel_name=""): @func_time_required def op_build_test(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", - attrs=None, log_cce=False, dump_ir=True, dump_cce=True, + attrs=None, log_cce=False, dump_ir=True, dump_code=True, polyhedral=True, tuning=False): """ Return module from op_build with given inputs, distinguish tuning mode. @@ -552,7 +565,7 @@ def op_build_test(op_func, input_shapes, input_types, op_attrs=None, kernel_name attrs (dict): tiling parameter. log_cce (bool): False by default. dump_ir (bool): True by default. - dump_cce (bool): False by default. + dump_code (bool): False by default. polyhedral (bool): True by default. tuning (bool): False by default. @@ -565,7 +578,7 @@ def op_build_test(op_func, input_shapes, input_types, op_attrs=None, kernel_name kernel_name = gen_kernel_name(input_shapes, input_types, op_attrs, kernel_name) logging.debug('kernel_name---------- %s', str(kernel_name)) mod = op_build(op_func, input_shapes, input_types, op_attrs, kernel_name, - attrs, log_cce, dump_ir, dump_cce, + attrs, log_cce, dump_ir, dump_code, polyhedral, tuning) return mod @@ -593,7 +606,7 @@ def recursive_copy(obj): def op_build(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", - attrs=None, log_cce=False, dump_ir=True, dump_cce=True, + attrs=None, log_cce=False, dump_ir=True, dump_code=True, polyhedral=True, tuning=False): """ Return module built from op_func with given inputs. @@ -607,7 +620,7 @@ def op_build(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", attrs (dict): tiling parameter. log_cce (bool): False by default. dump_ir (bool): True by default. - dump_cce (bool): False by default. + dump_code (bool): False by default. polyhedral (bool): True by default. tuning (bool): False by default. @@ -730,9 +743,13 @@ def op_build(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", kernel_name = kernel_name if kernel_name != "" else sch_tmpl['op_name'] with akg.tvm.target.cuda() as target: s = sch_tmpl['schedule'](sch_tmpl['output']) - with akg.build_config(dump_pass_ir=True): - mod = akg.build(s, op_var, "cuda", shape_var, name=kernel_name, attrs=attrs, polyhedral=polyhedral, binds=binds) + with akg.tvm.build_config(dump_pass_ir=dump_ir): + mod = akg.build(s, op_var, "cuda", shape_var, name=kernel_name, attrs=attrs, + polyhedral=polyhedral, binds=binds) dump_cuda_meta.dump(mod, kernel_name, s, op_var) + if dump_code: + source_code = mod.imported_modules[0].get_source() + create_code(kernel_name, "./", source_code, "CUDA") return mod if isinstance(output, (list, tuple)): @@ -781,9 +798,9 @@ def op_build(op_func, input_shapes, input_types, op_attrs=None, kernel_name="", if log_cce: logging.debug("#################cce code####################") logging.debug(source_code) - if dump_cce: - cce_path = "./" - create_cce(kernel_name, cce_path, source_code) + if dump_code: + code_path = "./" + create_code(kernel_name, code_path, source_code) return mod diff --git a/src/codegen/build_module.cc b/src/codegen/build_module.cc index 1f8bfd2c356d1e506fd615996d9fac4f9329550c..e1c258f046c2fd859510cc690ef275e2ef0d8b1d 100644 --- a/src/codegen/build_module.cc +++ b/src/codegen/build_module.cc @@ -1110,8 +1110,8 @@ air::runtime::Module BuildToModule(const NodeRef &ref, const std::string &target mhost.Import(mdev); } - const char *akg_dump_cce = getenv("MS_AKG_DUMP_CCE"); - if (akg_dump_cce != nullptr) { + const char *akg_dump_code = getenv("MS_AKG_DUMP_CODE"); + if (akg_dump_code != nullptr) { auto mod0 = mhost->imports()[0]; CHECK(mod0.defined()); diff --git a/tests/common/test_op/add_a_conv.py b/tests/common/test_op/add_a_conv.py index 85a00671bc103bd3cd9e1e84e91bc1ac58117318..aec9a68a7ec37ef31c9cff62bed5c927ed4ac4fc 100644 --- a/tests/common/test_op/add_a_conv.py +++ b/tests/common/test_op/add_a_conv.py @@ -207,7 +207,7 @@ def add_a_conv(fmap_shape, filter_shape, pad_, stride_, dilation_, mod = akg.build(s, [a_value, b_value, conv], "cce", name=kernel_name, attrs=attrs, polyhedral=True) source_code = mod.imported_modules[0].get_source() cce_path = '.' - utils.create_cce(kernel_name, cce_path, source_code) + utils.create_code(kernel_name, cce_path, source_code) return mod diff --git a/tests/common/test_op/add_b_conv.py b/tests/common/test_op/add_b_conv.py index de564310b854f8e4654548ac94efc21e57213e94..fbdd51e8c882a701d68f49c18af57259355ddbf0 100644 --- a/tests/common/test_op/add_b_conv.py +++ b/tests/common/test_op/add_b_conv.py @@ -201,7 +201,7 @@ def add_b_conv(fmap_shape, filter_shape, pad_, stride_, dilation_, mod = akg.build(s, [a_value, b_value, conv], "cce", name=kernel_name, attrs=attrs, polyhedral=True) source_code = mod.imported_modules[0].get_source() cce_path = '.' - utils.create_cce(kernel_name, cce_path, source_code) + utils.create_code(kernel_name, cce_path, source_code) return mod diff --git a/tests/common/test_op/col2im_compute.py b/tests/common/test_op/col2im_compute.py index 184275ff593baa170d675d9d23236669f94b3600..63a54512451470571cffae7576c04781148e6d96 100644 --- a/tests/common/test_op/col2im_compute.py +++ b/tests/common/test_op/col2im_compute.py @@ -64,5 +64,5 @@ def col2im_manual_schedule(shape, kernel, stride, pad, dtype, output_H_W, polyhe mod = akg.build(s, [data, res], "cce", name="col2im_manual_schedule", attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "col2im_manual_schedule" - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) return mod diff --git a/tests/common/test_op/gather.py b/tests/common/test_op/gather.py index 0c4f315af63081846fc0e45d7939e3e2a1dfbad4..adbb88a7bb3cd38300a349946d9ae29caae03362 100644 --- a/tests/common/test_op/gather.py +++ b/tests/common/test_op/gather.py @@ -115,6 +115,6 @@ def gather(params_shape, indices_shape, params_dtype, indices_dtype, axis, kerne mod = akg.build(s, [xx, yy, res], "cce", name=kernel_name, attrs=attrs) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, cce_path, source_code) + utils.create_code(kernel_name, cce_path, source_code) return mod diff --git a/tests/common/test_op/im2col_compute.py b/tests/common/test_op/im2col_compute.py index b17a0b7b341dce4b331a89f586bedab0b5360b8f..569ada841255aae50a99d802625ea29f268a7258 100644 --- a/tests/common/test_op/im2col_compute.py +++ b/tests/common/test_op/im2col_compute.py @@ -109,5 +109,5 @@ def im2col_manual_schedule(shape, kernel, stride, pad, dtype, polyhedral=True, a attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "im2col_manual_schedule" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod diff --git a/tests/common/test_op/reduce_max_ad.py b/tests/common/test_op/reduce_max_ad.py index ab6ad0e4a1809a317503d609359c5fe03d3ebb96..78a86fcaa013fc4e789044eb9290089078e0fef7 100644 --- a/tests/common/test_op/reduce_max_ad.py +++ b/tests/common/test_op/reduce_max_ad.py @@ -200,5 +200,5 @@ def reduce_max_ad_optimized_manual_schedule(input_shape, dtype, axis, keepdims, name="reduce_max_ad_manual_schedule", attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "reduce_max_ad_manual_schedule" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod diff --git a/tests/common/test_op/reduce_min_ad.py b/tests/common/test_op/reduce_min_ad.py index c2f762804d21a45cb788c967107d670fee6e6120..9d4ae087819c5e7ded56fc12536c42926edb3e4c 100644 --- a/tests/common/test_op/reduce_min_ad.py +++ b/tests/common/test_op/reduce_min_ad.py @@ -159,5 +159,5 @@ def reduce_min_ad_optimized_manual_schedule(input_shape, dtype, axis, keepdims, attrs=attrs, polyhedral=polyhedral) source_code = mod.imported_modules[0].get_source() kernel_name = "reduce_min_ad_manual_schedule" - utils.create_cce(kernel_name, './', source_code) + utils.create_code(kernel_name, './', source_code) return mod diff --git a/tests/common/test_op/vector_matmul.py b/tests/common/test_op/vector_matmul.py index c9cbaaae1b302f48a9abdd60bb337d51f43dabc9..397a73ff7b76f43781ada53537e4b0cbc25b5128 100644 --- a/tests/common/test_op/vector_matmul.py +++ b/tests/common/test_op/vector_matmul.py @@ -105,5 +105,5 @@ def vector_matmul(data_m, data_n, data_k, trans_a, trans_b, dtype, kernel_name, with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True): mod = akg.build(forward_s, op_vars, "cce", name=kernel_name, attrs=attrs, polyhedral=True) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) return mod, output_shape diff --git a/tests/common/test_run/IOU_for_train_run.py b/tests/common/test_run/IOU_for_train_run.py index a9d5325e7f1aa7723d22389a1fe31475ac38e088..727dce53b68fdfe3de8d35a09c4aa86beeb7f342 100644 --- a/tests/common/test_run/IOU_for_train_run.py +++ b/tests/common/test_run/IOU_for_train_run.py @@ -81,7 +81,7 @@ def iou_for_train_run(shape_tensor, output = utils.mod_launch(mod, (anchor, ground_truth, output), expect=expect) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/avgpool_ad_run.py b/tests/common/test_run/avgpool_ad_run.py index 3b36fb373caf7aee34a14fd9dbd7a6d12d488972..7c7c916e7717ff3e11ce9ab33f843e2e5ad25cdf 100644 --- a/tests/common/test_run/avgpool_ad_run.py +++ b/tests/common/test_run/avgpool_ad_run.py @@ -41,7 +41,7 @@ def avgpool_ad_run(shape, kernel, stride, pad, dtype, polyhedral=False, attrs=No input = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype]) y = avgpool_run.benchmark(input, kernel, stride, pad) mod = utils.op_build_test(avgpool, [y.shape, shape], [dtype, dtype], op_attrs=[kernel, stride, pad], - kernel_name=kernel_name, attrs=attrs, log_cce=True, dump_cce=True, tuning=t) + kernel_name=kernel_name, attrs=attrs, log_cce=True, dump_code=True, tuning=t) if t: expect, head, output = gen_data(dtype, input, kernel, pad, stride, support_list, y) return mod, expect, (head, input, output) @@ -51,7 +51,7 @@ def avgpool_ad_run(shape, kernel, stride, pad, dtype, polyhedral=False, attrs=No input = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype]) y = avgpool_run.benchmark(input, kernel, stride, pad) mod = utils.op_build_test(avgpool, [y.shape, shape], [dtype, dtype], op_attrs=[kernel, stride, pad], - kernel_name=kernel_name, attrs=attrs, log_cce=True, dump_cce=True) + kernel_name=kernel_name, attrs=attrs, log_cce=True, dump_code=True) expect, head, output = gen_data(dtype, input, kernel, pad, stride, support_list, y) output = utils.mod_launch(mod, [head, input, output], expect=expect) diff --git a/tests/common/test_run/bounding_box_encode_run.py b/tests/common/test_run/bounding_box_encode_run.py index 17720ee5910420d2013fce09a9307c10e6c41aa1..d076aca1b0952b4db76568f741c6db0034d81da5 100644 --- a/tests/common/test_run/bounding_box_encode_run.py +++ b/tests/common/test_run/bounding_box_encode_run.py @@ -197,7 +197,7 @@ def bounding_box_encode_run(anchor_box_shape, groundtruth_box_shape, anchor_samp mod = utils.op_build_test(bounding_box_encode.bouding_box_encode, [anchor_box_shape, groundtruth_box_shape, anchor_samples_shape], [dtype, dtype, "int32"], - op_attrs, kernel_name=kernel_name, attrs=attrs, dump_cce=True, tuning=t) + op_attrs, kernel_name=kernel_name, attrs=attrs, dump_code=True, tuning=t) if t: anchor_box_data, anchor_samples_data, expect, groundtruth_box_data, output_data = gen_data(anchor_box_shape, anchor_samples_shape, @@ -211,7 +211,7 @@ def bounding_box_encode_run(anchor_box_shape, groundtruth_box_shape, anchor_samp mod = utils.op_build_test(bounding_box_encode.bouding_box_encode, [anchor_box_shape, groundtruth_box_shape, anchor_samples_shape], [dtype, dtype, "int32"], - op_attrs, kernel_name=kernel_name, attrs=attrs, dump_cce=True) + op_attrs, kernel_name=kernel_name, attrs=attrs, dump_code=True) anchor_box_data, anchor_samples_data, expect, groundtruth_box_data, output_data = gen_data(anchor_box_shape, anchor_samples_shape, dtype, epsilon, diff --git a/tests/common/test_run/conv_filter_ad_run.py b/tests/common/test_run/conv_filter_ad_run.py index 04f636ad661c1bf18f32c90168111c353b1bc675..3bd5e376ba7974c36163989f5808c7341ca834e8 100644 --- a/tests/common/test_run/conv_filter_ad_run.py +++ b/tests/common/test_run/conv_filter_ad_run.py @@ -192,7 +192,7 @@ def conv_filter_ad_run(fmap_shape, filter_shape, pad_, stride_, dilation_, attr return np_input, out_data, expect, True mod = utils.op_build_test(conv_filter_ad.conv_filter_ad, [dw_input_shapes], [conv_dtype], - op_attrs=[fmap_shape, filter_shape, pad_, stride_, dilation_], kernel_name='conv_filter_ad', attrs=attrs, dump_cce = True) + op_attrs=[fmap_shape, filter_shape, pad_, stride_, dilation_], kernel_name='conv_filter_ad', attrs=attrs, dump_code = True) args = (dy_data, dx_data, out_data) out_data = utils.mod_launch(mod, args, expect=expect) rtol, atol = get_rtol_atol("conv_filter_ad", conv_dtype) diff --git a/tests/common/test_run/conv_run_mansch.py b/tests/common/test_run/conv_run_mansch.py index d9d221d95e3ef5cb6c443e185042284f435d3671..6795678962304de837b247ba6872c9bc583d66fe 100644 --- a/tests/common/test_run/conv_run_mansch.py +++ b/tests/common/test_run/conv_run_mansch.py @@ -34,7 +34,7 @@ def conv_run_mansch(FMap_shape, Filter_shape, Pad, Stride, Dilation=None, use_bi use_bias=use_bias, fp32_mad=fp32_mad, kernel_name="conv_mansch") source_code = mod.imported_modules[0].get_source() - utils.create_cce("conv_mansch", ".", source_code) + utils.create_code("conv_mansch", ".", source_code) A, B, bias_data, expect = gen_data(FMap_shape, Filter_shape, Pad, Stride, Dilation, use_bias) expect = expect.reshape((expect.shape[0], expect.shape[1], expect.shape[2]*expect.shape[3],expect.shape[4])) # output on conv2d is in 4d format diff --git a/tests/common/test_run/distr_bernoulli_logprob_ad_run.py b/tests/common/test_run/distr_bernoulli_logprob_ad_run.py index 7129e4b80997ad196b510ac2407848a822b3d26e..064d9a71f5836205d2f72472c8cb6d64da066ebb 100644 --- a/tests/common/test_run/distr_bernoulli_logprob_ad_run.py +++ b/tests/common/test_run/distr_bernoulli_logprob_ad_run.py @@ -26,7 +26,7 @@ def logprob_ad_run(shape, dtype, kernel_name="", attrs=None): mod = utils.op_build_test(distr_bernoulli_logprob_ad.bernoulli_logprob_ad, [head.shape, x.shape, probs.shape], [dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=None, attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=None, attrs=None, log_cce=True, dump_code=True, polyhedral=True) outputs = utils.mod_launch(mod, [head, x, probs, *outputs], outputs=tuple(range(-len(outputs), 0)), expect=expects) outputs = list(outputs) diff --git a/tests/common/test_run/distr_bernoulli_logprob_run.py b/tests/common/test_run/distr_bernoulli_logprob_run.py index 72b25256b7578ee2e5b79c6511b624c6f154186f..1c0a73ac630c5b2ec3a22a10765696d59ce78458 100644 --- a/tests/common/test_run/distr_bernoulli_logprob_run.py +++ b/tests/common/test_run/distr_bernoulli_logprob_run.py @@ -29,7 +29,7 @@ def log_prob_run(shape, dtype, kernelname="", attrs = None): mod = utils.op_build_test(log_prob_op, [x.shape, probs.shape], [dtype, dtype], kernel_name=kernelname, - op_attrs=[], attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=[], attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [x, probs, output], expect=expect) return (x, probs), output, expect, compare_tensor(output, expect, rtol=1e-03, atol=1e-03, equal_nan=True) diff --git a/tests/common/test_run/distr_normal_diag_KLdiv_ad_run.py b/tests/common/test_run/distr_normal_diag_KLdiv_ad_run.py index a20d40970caee42543e5791201fb1964857e625d..e1c7cc5c0b921472ce8966a931db9376a4e11fbf 100644 --- a/tests/common/test_run/distr_normal_diag_KLdiv_ad_run.py +++ b/tests/common/test_run/distr_normal_diag_KLdiv_ad_run.py @@ -25,7 +25,7 @@ def KLdiv_ad_run(shape, dtype, kernel_name="", attrs=None): mod = utils.op_build_test(distr_normal_diag_KLdiv_ad.normal_diag_KLdiv_ad, [head.shape, mean.shape, scale.shape], [dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=None, attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=None, attrs=None, log_cce=True, dump_code=True, polyhedral=True) outputs = utils.mod_launch(mod, [head, mean, scale, *outputs], outputs=tuple(range(-len(outputs), 0)), expect=expects) outputs = list(outputs) diff --git a/tests/common/test_run/distr_normal_diag_KLdiv_run.py b/tests/common/test_run/distr_normal_diag_KLdiv_run.py index b668f4dcc34ef8b3253dd8a08f5ca7010a5acd57..b2143b1d6f5a6d89326f49c4f4feb9dd5fc0c56a 100644 --- a/tests/common/test_run/distr_normal_diag_KLdiv_run.py +++ b/tests/common/test_run/distr_normal_diag_KLdiv_run.py @@ -27,7 +27,7 @@ def KLdiv_run(shape, dtype, kernelname="", attrs = None): mod = utils.op_build_test(KLdiv_op, [mean.shape, scale.shape], [dtype, dtype], kernel_name=kernelname, - op_attrs=[], attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=[], attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [mean, scale, output], expect = expect) return (mean, scale), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/distr_normal_diag_logprob_ad_run.py b/tests/common/test_run/distr_normal_diag_logprob_ad_run.py index 90e2548b779091e1abac7ffdf26fd4f4214c43f1..386108fb95b3f3c68818254e66329cd668ed5471 100644 --- a/tests/common/test_run/distr_normal_diag_logprob_ad_run.py +++ b/tests/common/test_run/distr_normal_diag_logprob_ad_run.py @@ -30,7 +30,7 @@ def logprob_ad_run(shape, dtype, kernel_name="", attrs=None): op_attrs=None, attrs=None, log_cce=True, - dump_cce=True, + dump_code=True, polyhedral=True, ) outputs = utils.mod_launch( diff --git a/tests/common/test_run/distr_normal_diag_logprob_run.py b/tests/common/test_run/distr_normal_diag_logprob_run.py index ea009450db5eec5fb278508ce97c08c680f826cb..60bd45c256f0d71887d172598744ab79dc70d389 100644 --- a/tests/common/test_run/distr_normal_diag_logprob_run.py +++ b/tests/common/test_run/distr_normal_diag_logprob_run.py @@ -28,7 +28,7 @@ def logprob_run(shape, dtype, kernelname="", attrs = None): mod = utils.op_build_test(logprob_op, [x.shape, mean.shape, scale.shape], [dtype, dtype, dtype], kernel_name=kernelname, - op_attrs=[], attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=[], attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [x, mean, scale, output], expect = expect) return (x, mean, scale), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/distr_normal_diag_sample_ad_run.py b/tests/common/test_run/distr_normal_diag_sample_ad_run.py index ab035224f1f87534881bc2d86fed1c430cad6b26..318db778fc6770c6ca0d30d3209b3465c4900ec9 100644 --- a/tests/common/test_run/distr_normal_diag_sample_ad_run.py +++ b/tests/common/test_run/distr_normal_diag_sample_ad_run.py @@ -24,7 +24,7 @@ def sample_ad_run(shape, dtype, kernel_name="", attrs=None): mod = utils.op_build_test(distr_normal_diag_sample_ad.normal_diag_sample_ad, [head.shape, mean.shape, scale.shape, eps.shape], [dtype, dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=None, attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=None, attrs=None, log_cce=True, dump_code=True, polyhedral=True) outputs = utils.mod_launch(mod, [head, mean, scale, eps, *outputs], outputs=tuple(range(-len(outputs), 0)), expect=expects) outputs = list(outputs) diff --git a/tests/common/test_run/distr_normal_diag_sample_run.py b/tests/common/test_run/distr_normal_diag_sample_run.py index 4154796615959348bb58b1f005847586f20a96d4..c321ff671afad1debd5a6bfb5b28447be883f221 100644 --- a/tests/common/test_run/distr_normal_diag_sample_run.py +++ b/tests/common/test_run/distr_normal_diag_sample_run.py @@ -26,7 +26,7 @@ def sample_run(shape, dtype, kernel_name="", attrs=None): mod = utils.op_build_test(sample_op, [mean.shape, scale.shape, eps.shape], [dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=None, attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=None, attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [mean, scale, eps, output], expect=expect) return (mean, scale, eps), output, expect, compare_tensor(output, expect, rtol=5e-03, atol=0.1, equal_nan=True) diff --git a/tests/common/test_run/distr_normal_prob_regr_train_run.py b/tests/common/test_run/distr_normal_prob_regr_train_run.py index 3fd9167760e1a49b577d2b1ac8bd2d59de0c95ec..d6b30b1ca1e063eec367873c451f9f46a1b7da5d 100644 --- a/tests/common/test_run/distr_normal_prob_regr_train_run.py +++ b/tests/common/test_run/distr_normal_prob_regr_train_run.py @@ -25,7 +25,7 @@ def prob_regression_run(shape, dtype, kernel_name, attrs): mod = utils.op_build_test(distr_normal_prob_regr_train.prob_regression_train, [x.shape, w.shape, y.shape], [dtype, dtype, dtype], kernel_name=kernel_name, - op_attrs=[], attrs=None, log_cce=True, dump_cce=True, polyhedral=True) + op_attrs=[], attrs=None, log_cce=True, dump_code=True, polyhedral=True) output = utils.mod_launch(mod, [x, w, y, output], expect=expect) return (x, w, y), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/dropout_run.py b/tests/common/test_run/dropout_run.py index b1b85fcb40cf5834a8a63e96374611c24658ae11..0741f6078caf35fe847dd7aa67753af19d721ba6 100644 --- a/tests/common/test_run/dropout_run.py +++ b/tests/common/test_run/dropout_run.py @@ -83,7 +83,7 @@ def dropout_execute(shape_tensor, keep_prob, dtype, kernel_name, attrs=None): output = utils.mod_launch(mod, (input, mask, output), expect=expect) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) rtol, atol = get_rtol_atol("dropout", dtype) return (input, mask), output, expect, compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True) diff --git a/tests/common/test_run/kldiv_loss_grad_run.py b/tests/common/test_run/kldiv_loss_grad_run.py index bd4268e01b907c9c8f3ba480da169e207df22710..deee6deee8bad939d2fecdb63a5f7cbfc7dd8687 100644 --- a/tests/common/test_run/kldiv_loss_grad_run.py +++ b/tests/common/test_run/kldiv_loss_grad_run.py @@ -28,7 +28,7 @@ def kldiv_loss_grad_run(shape, dtype, kernel_name="kldiv_loss_grad", attrs=None) t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(kldiv_loss_grad.kldiv_loss_grad, [shape, shape, shape], [dtype, dtype, dtype], - kernel_name=kernel_name, attrs=attrs, dump_cce=True, tuning=t) + kernel_name=kernel_name, attrs=attrs, dump_code=True, tuning=t) if t: cur_deriv, output, pre_deriv, prediction, target = gen_data(attrs, dtype, shape) return mod, cur_deriv, (pre_deriv, prediction, target, output) @@ -36,7 +36,7 @@ def kldiv_loss_grad_run(shape, dtype, kernel_name="kldiv_loss_grad", attrs=None) return mod else: mod = utils.op_build_test(kldiv_loss_grad.kldiv_loss_grad, [shape, shape, shape], [dtype, dtype, dtype], - kernel_name=kernel_name, attrs=attrs, dump_cce=True) + kernel_name=kernel_name, attrs=attrs, dump_code=True) cur_deriv, output, pre_deriv, prediction, target = gen_data(attrs, dtype, shape) output = utils.mod_launch(mod, (pre_deriv, prediction, target, output), expect=cur_deriv) return (pre_deriv, prediction, target), output, cur_deriv, compare_tensor(output, cur_deriv, rtol=0.005, diff --git a/tests/common/test_run/l1_loss_grad_run.py b/tests/common/test_run/l1_loss_grad_run.py index c1c4f4c3bfc01ee030331a1ac28bd25667b6e8cf..d1bcf62816ff35f6a7778076e7cbc3c257bd0b41 100644 --- a/tests/common/test_run/l1_loss_grad_run.py +++ b/tests/common/test_run/l1_loss_grad_run.py @@ -28,7 +28,7 @@ def l1_loss_grad_run(shape, dtype, kernel_name="l1_loss_grad", attrs=None): t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(l1_loss_grad.l1_loss_grad, [shape, shape, shape], [dtype, dtype, dtype], - kernel_name=kernel_name, attrs=attrs, dump_cce=True, tuning=t) + kernel_name=kernel_name, attrs=attrs, dump_code=True, tuning=t) if t: dloss, expect, output, prediction, target = gen_data(dtype, shape) return mod, expect, (dloss, prediction, target, output) @@ -36,7 +36,7 @@ def l1_loss_grad_run(shape, dtype, kernel_name="l1_loss_grad", attrs=None): return mod else: mod = utils.op_build_test(l1_loss_grad.l1_loss_grad, [shape, shape, shape], [dtype, dtype, dtype], - kernel_name=kernel_name, attrs=attrs, dump_cce=True) + kernel_name=kernel_name, attrs=attrs, dump_code=True) dloss, expect, output, prediction, target = gen_data(dtype, shape) output = utils.mod_launch(mod, (dloss, prediction, target, output), expect=expect) return (dloss, prediction, target), output, expect, compare_tensor(output, expect, rtol=0.001, atol=0.001) diff --git a/tests/common/test_run/matmul_run_mansch.py b/tests/common/test_run/matmul_run_mansch.py index 3775eaec08b5b95f68dab528ca8f06a22abb8434..9e52d9e90cef2380a10e43ca62c9d49b1ee4ae5d 100644 --- a/tests/common/test_run/matmul_run_mansch.py +++ b/tests/common/test_run/matmul_run_mansch.py @@ -45,7 +45,7 @@ def matmul_run_mansch(MatrixShape, l1_tiling, l0_tiling, kernel_name, attrs=None # launch the kernel mod = matmul_mansch.gemm_dsl(MatrixShape, l1_tiling, l0_tiling, kernel_name) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, ".", source_code) + utils.create_code(kernel_name, ".", source_code) res = utils.mod_launch(mod, [A, B, out_data]) # transform numpy data to compute benchMark diff --git a/tests/common/test_run/maxpool_ad_run.py b/tests/common/test_run/maxpool_ad_run.py index 149078320273c3bb9cd0d037aaaa8789306f5092..805f5d9c365e74ea50731634411ffbcd3955449a 100644 --- a/tests/common/test_run/maxpool_ad_run.py +++ b/tests/common/test_run/maxpool_ad_run.py @@ -44,14 +44,14 @@ def maxpool_ad_run(shape, kernel, stride, pad, dtype, optimized, polyhedral=Fals else: mod = utils.op_build_test(maxpool_ad_no_custom_diff_poly_all_max, [head.shape, shape], [dtype, dtype], kernel_name="maxpool_ad_no_custom_diff_poly_all_max", - op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_cce=True, polyhedral=polyhedral) + op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_code=True, polyhedral=polyhedral) output = utils.mod_launch(mod, [head, input, output], expect=expect) else: if optimized: if first_max: mod = utils.op_build_test(maxpool_ad, [head.shape, shape, forward.shape, mask.shape], [dtype, dtype, dtype, dtype], kernel_name="maxpool_ad_first_max", - op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_cce=True, polyhedral=polyhedral) + op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_code=True, polyhedral=polyhedral) output = utils.mod_launch(mod, [head, input, forward, mask, output], expect=expect) else: mod = maxpool_ad_manual_schedule_all_max(shape, kernel, stride, pad, dtype, attrs=attrs, polyhedral=polyhedral) @@ -62,7 +62,7 @@ def maxpool_ad_run(shape, kernel, stride, pad, dtype, optimized, polyhedral=Fals else: mod = utils.op_build_test(maxpool_ad_no_custom_diff_manual_schedule_all_max, [head.shape, shape], [dtype, dtype], kernel_name="maxpool_ad_no_custom_diff_manual_schedule_all_max", - op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_cce=True, polyhedral=polyhedral) + op_attrs=[kernel, stride, pad], attrs=attrs, log_cce=False, dump_code=True, polyhedral=polyhedral) output = utils.mod_launch(mod, [head, input, output], expect=expect) if 'tuning' in attrs.keys(): diff --git a/tests/common/test_run/maxpool_grad_run.py b/tests/common/test_run/maxpool_grad_run.py index 5d02eb8bd9e2260d824c6cb262b472b8b76952c3..2a5c95f8c20b22a4cf7d991a1c43cd08215eaa33 100644 --- a/tests/common/test_run/maxpool_grad_run.py +++ b/tests/common/test_run/maxpool_grad_run.py @@ -100,7 +100,7 @@ def maxpool_grad_run(shape, kernel, stride, pad, dtype, attrs): mod = utils.op_build_test(maxpool_grad.maxpool_grad, [shape, y_shape, y_shape], [dtype, dtype, dtype], op_attrs=[kernel, stride, pad], - kernel_name=kernel_name, attrs=attrs, dump_cce=True, tuning=t) + kernel_name=kernel_name, attrs=attrs, dump_code=True, tuning=t) if t: dy, expect, output, x, y = \ gen_data(dtype, kernel, pad, shape, stride, y_shape) @@ -111,7 +111,7 @@ def maxpool_grad_run(shape, kernel, stride, pad, dtype, attrs): mod = utils.op_build_test(maxpool_grad.maxpool_grad, [shape, y_shape, y_shape], [dtype, dtype, dtype], op_attrs=[kernel, stride, pad], - kernel_name='maxpool_grad', attrs=attrs, dump_cce=True) + kernel_name='maxpool_grad', attrs=attrs, dump_code=True) dy, expect, output, x, y = \ gen_data(dtype, kernel, pad, shape, stride, y_shape) output = utils.mod_launch(mod, (x, y, dy, output), expect=expect) diff --git a/tests/common/test_run/maxpool_grad_with_argmax_run.py b/tests/common/test_run/maxpool_grad_with_argmax_run.py index 93f884e0b9552c7d7453c5ba54e9ca721d48de6f..b456bd7d36b0321833df1f8a3a30620a6fbdac04 100644 --- a/tests/common/test_run/maxpool_grad_with_argmax_run.py +++ b/tests/common/test_run/maxpool_grad_with_argmax_run.py @@ -31,7 +31,7 @@ def maxpool_grad_with_argmax_run(shape, kernel, stride, pad, dtype, polyhedral=F mod = utils.op_build_test(maxpool_grad_with_argmax, [head.shape, mask.shape], [dtype, dtype], kernel_name="maxpool_grad_with_argmax", op_attrs=[shape, kernel, stride, pad], attrs=attrs, - log_cce=False, dump_cce=True, polyhedral=polyhedral) + log_cce=False, dump_code=True, polyhedral=polyhedral) if t: return mod, expect, (head, mask, output) else: @@ -43,7 +43,7 @@ def maxpool_grad_with_argmax_run(shape, kernel, stride, pad, dtype, polyhedral=F mod = utils.op_build_test(maxpool_grad_with_argmax, [head.shape, mask.shape], [dtype, dtype], kernel_name="maxpool_grad_with_argmax", op_attrs=[shape, kernel, stride, pad], attrs=attrs, - log_cce=False, dump_cce=True, polyhedral=polyhedral) + log_cce=False, dump_code=True, polyhedral=polyhedral) output = utils.mod_launch(mod, [head, mask, output], expect=expect) rtol, atol = get_rtol_atol("maxpool_grad_with_argmax", dtype) diff --git a/tests/common/test_run/nms_run.py b/tests/common/test_run/nms_run.py index d5e746072ddfc9f2b5a06911b4f334fc5ad58006..e35a438f0af14676fe275ae175f1ec4e63735376 100644 --- a/tests/common/test_run/nms_run.py +++ b/tests/common/test_run/nms_run.py @@ -94,6 +94,6 @@ def nms_run(shape_tensor, thres, dtype, kernel_name, attrs): output = utils.mod_launch(mod, (anchor, output), expect=expect) output = np.frombuffer(output.tobytes(), np.uint16).reshape(out_shape) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, "./", source_code) + utils.create_code(kernel_name, "./", source_code) expect = np.frombuffer(expect.tobytes(), np.uint16).reshape(out_shape) return anchor, output, expect, np.all(output == expect) diff --git a/tests/common/test_run/roipool_run.py b/tests/common/test_run/roipool_run.py index cecebf4c71136f534d0d6bb10e5305b9a6671207..954ade403e3148134397f3b7f5261833d9a8d15a 100644 --- a/tests/common/test_run/roipool_run.py +++ b/tests/common/test_run/roipool_run.py @@ -32,7 +32,7 @@ def roipool_run(shape, roibox, pooled_shape, dtype, attrs, cce_path="./"): expect = roipool_expect(input1, shape, roibox, pooled_shape) # source_code = mod.imported_modules[0].get_source() - # utils.create_cce(kernel_name, cce_path, source_code) + # utils.create_code(kernel_name, cce_path, source_code) output = np.full(output_shape, np.nan, dtype) output = utils.mod_launch(mod, (input1, output), expect=expect) diff --git a/tests/common/test_run/smooth_l1_loss_grad_run.py b/tests/common/test_run/smooth_l1_loss_grad_run.py index afa78b4e7ead501144ea03425445488603d48216..cdf29c2bc7a891d0820475d5c4bc5e597916cbfd 100644 --- a/tests/common/test_run/smooth_l1_loss_grad_run.py +++ b/tests/common/test_run/smooth_l1_loss_grad_run.py @@ -37,7 +37,7 @@ def smooth_l1_loss_grad_run(shape, dtype, attrs=None, kernel_name="smooth_l1_los kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(smooth_l1_loss_grad.smooth_l1_loss_grad, [sample_shape, shape, shape, sample_shape], [dtype, dtype, dtype, anchor_samples_dtype], op_attrs=[sigma, anchor_sample_correct], - attrs=attrs, kernel_name=kernel_name, dump_cce=True, tuning=t) + attrs=attrs, kernel_name=kernel_name, dump_code=True, tuning=t) if t: anchor_samples, dloss, expect, output, prediction, prediction_, target, target_ = gen_data( anchor_sample_correct, anchor_samples_dtype, dtype, sample_shape, shape, sigma) @@ -50,7 +50,7 @@ def smooth_l1_loss_grad_run(shape, dtype, attrs=None, kernel_name="smooth_l1_los mod = utils.op_build_test(smooth_l1_loss_grad.smooth_l1_loss_grad, [sample_shape, shape, shape, sample_shape], [dtype, dtype, dtype, anchor_samples_dtype], op_attrs=[sigma, anchor_sample_correct], - attrs=attrs, kernel_name=kernel_name, dump_cce=True) + attrs=attrs, kernel_name=kernel_name, dump_code=True) output = utils.mod_launch(mod, (dloss, prediction, target, anchor_samples, output), expect=expect) return (dloss, prediction, target, anchor_samples), output, expect, compare_tensor(output, expect, atol=5e-3, rtol=5e-3) diff --git a/tests/common/test_run/square_difference_run.py b/tests/common/test_run/square_difference_run.py index 4b5430f02e84994e079926cc1055733d0f27be6e..88880a859c8b35655ea277e0d87ed2a662243daa 100644 --- a/tests/common/test_run/square_difference_run.py +++ b/tests/common/test_run/square_difference_run.py @@ -35,7 +35,7 @@ def square_difference_run(shape1, shape2, dtype, kernel_name, attrs, cce_path=". input_types=[dtype, dtype], kernel_name=kernel_name, attrs=attrs) expect, input1, input2, output = gen_data(dtype, shape1, shape2) source_code = mod.imported_modules[0].get_source() - utils.create_cce(kernel_name, cce_path, source_code) + utils.create_code(kernel_name, cce_path, source_code) output = utils.mod_launch(mod, (input1, input2, output), expect=expect) return (input1, input2), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True) diff --git a/tests/common/test_run/strided_slice_grad_run.py b/tests/common/test_run/strided_slice_grad_run.py index 37b8564f68b1d2c2e6ee37284e40f6756c2306ae..f21a8acaca166600db83a391ef3258b3d4670d69 100644 --- a/tests/common/test_run/strided_slice_grad_run.py +++ b/tests/common/test_run/strided_slice_grad_run.py @@ -81,7 +81,7 @@ def gen_data(begin, begin_mask, dtype, ellipsis_mask, end, end_mask, grad_shape, # source_code = mod.imported_modules[0].get_source() # print(source_code) # kernel_name = "cce_strided_slice_grad_fp16" - # utils.create_cce(kernel_name, './', source_code) + # utils.create_code(kernel_name, './', source_code) out_shape = input_shape output = np.full(out_shape, 0, dtype) return expect, grad, output diff --git a/tests/common/test_run/truncatemod_run.py b/tests/common/test_run/truncatemod_run.py index b3ce16a1bb79651ebfaf480ec53e76dec32a7357..1c2d2d3cf5b1d5868ecf0db1f97ce9e33cb66b8c 100644 --- a/tests/common/test_run/truncatemod_run.py +++ b/tests/common/test_run/truncatemod_run.py @@ -26,7 +26,7 @@ def truncatemod_run(shape1, shape2, dtype, attrs): t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(truncatemod.truncatemod, [shape1, shape2], [dtype, dtype], kernel_name=kernel_name, - attrs=attrs, dump_cce=True, tuning=t) + attrs=attrs, dump_code=True, tuning=t) if t: expect, input1, input2, output = gen_data(dtype, shape1, shape2) return mod, expect, (input1, input2, output) @@ -35,7 +35,7 @@ def truncatemod_run(shape1, shape2, dtype, attrs): else: expect, input1, input2, output = gen_data(dtype, shape1, shape2) mod = utils.op_build_test(truncatemod.truncatemod, [shape1, shape2], [dtype, dtype], kernel_name="truncatemod", - attrs=attrs, dump_cce=True) + attrs=attrs, dump_code=True) output = utils.mod_launch(mod, (input1, input2, output), expect=expect) rtol, atol = get_rtol_atol("truncatemod", dtype) res = compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True) diff --git a/tests/common/test_run/vector_matmul_run.py b/tests/common/test_run/vector_matmul_run.py index cb95dd3142c6c1a1070127a06c2da3eded62a747..8a504a829ffc05c07637092fabef53b45b84adfc 100644 --- a/tests/common/test_run/vector_matmul_run.py +++ b/tests/common/test_run/vector_matmul_run.py @@ -145,7 +145,7 @@ def vector_matmul_run(case_index, m, n, k, trans_a, trans_b, read_data, dump_dat # k = (k+15)//16*16 mod, out_shape = vector_matmul.vector_matmul(m, n, k, trans_a, trans_b, dtype, kernel_name, attrs) - utils.create_cce(kernel_name, "./", mod.imported_modules[0].get_source()) + utils.create_code(kernel_name, "./", mod.imported_modules[0].get_source()) # Generate data m_a, m_b, bench_mark = vector_matmul_data(case_index, m, n, k, trans_a, trans_b, read_data, dump_data, dtype) diff --git a/tests/common/test_run/winograd_ad_run.py b/tests/common/test_run/winograd_ad_run.py index a32670f0c0b53f7c075c55251aac5cda729ea480..2621c25fe88560a515ad60cbde5e6069f8a80acc 100644 --- a/tests/common/test_run/winograd_ad_run.py +++ b/tests/common/test_run/winograd_ad_run.py @@ -35,7 +35,7 @@ def winograd_ad_run(filter_shape, tile, dtype, attrs): t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(winograd_ad, [head_np.shape, filter_shape], [dtype, dtype], kernel_name=kernel_name, - attrs=attrs, log_cce=True, dump_cce=True, tuning=t) + attrs=attrs, log_cce=True, dump_code=True, tuning=t) if t: expect, input_np, output = gen_data(filter_shape, RANGEFILL, dtype) return mod, expect, (head_np, input_np, output) @@ -45,7 +45,7 @@ def winograd_ad_run(filter_shape, tile, dtype, attrs): # scenario 1: expect, input_np, output = gen_data(filter_shape, RANGEFILL, dtype) mod = utils.op_build_test(winograd_ad, [head_np.shape, filter_shape], [dtype, dtype], kernel_name="winograd_ad", - attrs=attrs, log_cce=True, dump_cce=True) + attrs=attrs, log_cce=True, dump_code=True) output = utils.mod_launch(mod, [head_np, input_np, output], expect=expect) if not compare_tensor(output, expect, atol=0.1): return [head_np, input_np], output, expect, compare_tensor(output, expect, rtol=5e-03, atol=5e-03, diff --git a/tests/operators/cube/quant_conv.py b/tests/operators/cube/quant_conv.py index eafe126ad6d45c909e91faa72535fcb614b78ba5..9cd4b604fb9a5901a6e3513bf9aa9f4722aad1b3 100644 --- a/tests/operators/cube/quant_conv.py +++ b/tests/operators/cube/quant_conv.py @@ -403,7 +403,7 @@ def test_CCE_Conv(fmap_shape, filter_shape, pad_, stride_, mod = akg.build(s, [A, B, ScaleQ, OffsetQ, out], "cce", name=kernel_name, attrs=attrs, attrs={"dim": info}, polyhedral=True) source_code = mod.imported_modules[0].get_source() # print(source_code) - # utils.create_cce(kernel_name, cce_path, source_code) + # utils.create_code(kernel_name, cce_path, source_code) if run_cce: run_conv(mod, fmap_shape, filter_shape, pad_[0], stride_[0], use_bias)