diff --git a/ai/BUILD.gn b/ai/BUILD.gn index 8dfd3c108f62741c2a92e34ecda6524d8202d93d..87f0e500961fee014cef972903c88d53901ff165 100644 --- a/ai/BUILD.gn +++ b/ai/BUILD.gn @@ -14,4 +14,7 @@ group("ai") { testonly = true deps = [ "neural_network_runtime:neural_network_runtime" ] + if (is_standard_system) { + deps += [ "mindspore:ActsMindSporeTest" ] + } } diff --git a/ai/mindspore/BUILD.gn b/ai/mindspore/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..0b71aece9507112fdb5025ac1bfba8356b44be62 --- /dev/null +++ b/ai/mindspore/BUILD.gn @@ -0,0 +1,36 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//test/xts/tools/build/suite.gni") + +module_output_path = "acts/ActsMindSporeTest" + +ohos_moduletest_suite("ActsMindSporeTest") { + module_out_path = module_output_path + sources = [ + "src/ohos_c_api_test_mslite.cpp", + "src/ohos_common.cpp", + ] + + include_dirs = [ "//foundation/ai/mindspore/" ] + + deps = [ + "//third_party/googletest:gmock", + "//third_party/googletest:gtest", + "//third_party/mindspore/mindspore/lite:mindspore_lib", + ] + + defines = [ "__STDC_FORMAT_MACROS" ] + subsystem_name = "ai" + part_name = "mindspore" +} diff --git a/ai/mindspore/Test.json b/ai/mindspore/Test.json new file mode 100644 index 0000000000000000000000000000000000000000..47e92792dba8ca113b211386c0c2be51fc7b0c1a --- /dev/null +++ b/ai/mindspore/Test.json @@ -0,0 +1,44 @@ +{ + "description": "Config for MindSpore test cases", + "driver": { + "module-name": "ActsMindSporeTest", + "native-test-timeout": "120000", + "native-test-device-path": "/data/local/tmp", + "runtime-hint": "1s", + "type": "CppTest" + }, + "kits": [ + { + "type": "ShellKit", + "run-command": [ + "remount", + "mkdir /data/test" + ] + }, + { + "type": "PushKit", + "push": [ + "ActsMindSporeTest->/data/local/tmp/ActsMindSporeTest", + "resources/ai/mindspore/aiy_vision_classifier_plants_V1_3/aiy_vision_classifier_plants_V1_3.ms -> /data/test", + "resources/ai/mindspore/aiy_vision_classifier_plants_V1_3/aiy_vision_classifier_plants_V1_3.input -> /data/test", + "resources/ai/mindspore/aiy_vision_classifier_plants_V1_3/aiy_vision_classifier_plants_V1_30.output -> /data/test", + "resources/ai/mindspore/ml_face_is_face/ml_face_isface.ms -> /data/test", + "resources/ai/mindspore/ml_face_is_face/ml_face_isfacer13.ms -> /data/test", + "resources/ai/mindspore/ml_face_is_face/ml_face_isface.input -> /data/test", + "resources/ai/mindspore/ml_face_is_face/ml_face_isface_0.input -> /data/test", + "resources/ai/mindspore/ml_face_is_face/ml_face_isface0.output -> /data/test", + "resources/ai/mindspore/ml_Hand_deploy/ml_Hand_deploy.ms -> /data/test", + "resources/ai/mindspore/ml_Hand_deploy/ml_Hand_deploy_0.input -> /data/test", + "resources/ai/mindspore/ml_Hand_deploy/ml_Hand_deploy0.output -> /data/test", + "resources/ai/mindspore/ml_ocr_cn/ml_ocr_cn_0.input -> /data/test", + "resources/ai/mindspore/ml_ocr_cn/ml_ocr_cn.ms -> /data/test", + "resources/ai/mindspore/ml_ocr_cn/ml_ocr_cn0.output -> /data/test", + "resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite.ms -> /data/test", + "resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite_0.input -> /data/test", + "resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite_1.input -> /data/test", + "resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite_2.input -> /data/test", + "resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite0.output -> /data/test" + ] + } +] +} diff --git a/ai/mindspore/src/ohos_c_api_test_mslite.cpp b/ai/mindspore/src/ohos_c_api_test_mslite.cpp new file mode 100644 index 0000000000000000000000000000000000000000..83b782c982afe82efa0853403359bb840d3cf0e7 --- /dev/null +++ b/ai/mindspore/src/ohos_c_api_test_mslite.cpp @@ -0,0 +1,1576 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "ohos_common.h" +#include "gtest/gtest.h" +#include "include/c_api/context_c.h" +#include "include/c_api/model_c.h" +#include "include/c_api/types_c.h" +#include "include/c_api/status_c.h" +#include "include/c_api/data_type_c.h" +#include "include/c_api/tensor_c.h" +#include "include/c_api/format_c.h" + +using namespace testing::ext; + +class MSLiteTest: public testing::Test { +protected: + static void SetUpTestCase(void) {} + static void TearDownTestCase(void) {} + virtual void SetUp() {} + virtual void TearDown() {} +}; + +// function before callback +bool PrintBeforeCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs, + const OH_AI_CallBackParam kernel_Info) { + std::cout << "Before forwarding " << kernel_Info.node_name << " " << kernel_Info.node_type << std::endl; + return true; +} + +// function after callback +bool PrintAfterCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs, + const OH_AI_CallBackParam kernel_Info) { + std::cout << "After forwarding " << kernel_Info.node_name << " " << kernel_Info.node_type << std::endl; + return true; +} + +// add cpu device info +void AddContextDeviceCPU(OH_AI_ContextHandle context) { + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + ASSERT_NE(cpu_device_info, nullptr); + OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(cpu_device_info); + printf("==========device_type:%d\n", device_type); + ASSERT_EQ(device_type, OH_AI_DEVICETYPE_CPU); + OH_AI_ContextAddDeviceInfo(context, cpu_device_info); +} + +// add gpu device info +void AddContextDeviceGPU(OH_AI_ContextHandle context) { + OH_AI_DeviceInfoHandle gpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_GPU); + ASSERT_NE(gpu_device_info, nullptr); + OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(gpu_device_info); + printf("==========device_type:%d\n", device_type); + ASSERT_EQ(device_type, OH_AI_DEVICETYPE_GPU); + OH_AI_ContextAddDeviceInfo(context, gpu_device_info); +} + +// fill data to inputs tensor +void FillInputsData(OH_AI_TensorHandleArray inputs, string model_name, bool is_transpose) { + for (size_t i = 0; i < inputs.handle_num; ++i) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + string input_data_path = "/data/test/" + model_name + "_" + std::to_string(i) + ".input"; + const char *imagePath = input_data_path.c_str(); + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + OH_AI_TensorHandle tensor = inputs.handle_list[i]; + int64_t element_num = OH_AI_TensorGetElementNum(tensor); + printf("Tensor name: %s. \n", OH_AI_TensorGetName(tensor)); + float *input_data = reinterpret_cast(OH_AI_TensorGetMutableData(inputs.handle_list[i])); + ASSERT_NE(input_data, nullptr); + if (is_transpose) { + printf("==========Transpose==========\n"); + size_t shape_num; + const int64_t *shape = OH_AI_TensorGetShape(tensor, &shape_num); + auto imageBuf_nhwc = new char[size1]; + PackNCHWToNHWCFp32(imageBuf, imageBuf_nhwc, shape[0], shape[1] * shape[2], shape[3]); + memcpy(input_data, imageBuf_nhwc, size1); + } else { + memcpy(input_data, imageBuf, size1); + } + printf("input data after filling is: "); + for (int j = 0; j < element_num && j <= 20; ++j) { + printf("%f ", input_data[j]); + } + printf("\n"); + delete[] imageBuf; + } +} + +// compare result after predict +void CompareResult(OH_AI_TensorHandleArray outputs, string model_name) { + printf("==========GetOutput==========\n"); + for (size_t i = 0; i < outputs.handle_num; ++i) { + OH_AI_TensorHandle tensor = outputs.handle_list[i]; + int64_t element_num = OH_AI_TensorGetElementNum(tensor); + printf("Tensor name: %s .\n", OH_AI_TensorGetName(tensor)); + float *output_data = reinterpret_cast(OH_AI_TensorGetMutableData(tensor)); + printf("output data is:"); + for (int j = 0; j < element_num && j <= 20; ++j) { + printf("%f ", output_data[j]); + } + printf("\n"); + printf("==========compFp32WithTData==========\n"); + string output_file = "/data/test/" + model_name + std::to_string(i) + ".output"; + bool result = compFp32WithTData(output_data, output_file, 0.01, 0.01, false); + EXPECT_EQ(result, true); + } +} + +// model build and predict +void ModelPredict(OH_AI_ModelHandle model, OH_AI_ContextHandle context, string model_name, + OH_AI_ShapeInfo shape_infos, bool build_by_graph, bool is_transpose, bool is_callback) { + string model_path = "/data/test/" + model_name + ".ms"; + const char *graphPath = model_path.c_str(); + OH_AI_Status ret = OH_AI_STATUS_SUCCESS; + if (build_by_graph) { + printf("==========Build model by graphBuf==========\n"); + size_t size; + size_t *ptr_size = &size; + char *graphBuf = ReadFile(graphPath, ptr_size); + ASSERT_NE(graphBuf, nullptr); + ret = OH_AI_ModelBuild(model, graphBuf, size, OH_AI_MODELTYPE_MINDIR, context); + } else { + printf("==========Build model==========\n"); + ret = OH_AI_ModelBuildFromFile(model, graphPath, OH_AI_MODELTYPE_MINDIR, context); + } + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + if (shape_infos.shape_num != NULL) { + printf("==========Resizes==========\n"); + OH_AI_Status resize_ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num); + printf("==========Resizes return code:%d\n", resize_ret); + ASSERT_EQ(resize_ret, OH_AI_STATUS_SUCCESS); + } + FillInputsData(inputs, model_name, is_transpose); + OH_AI_TensorHandleArray outputs; + OH_AI_Status predict_ret = OH_AI_STATUS_SUCCESS; + if (is_callback) { + printf("==========Model Predict Callback==========\n"); + OH_AI_KernelCallBack before_call_back = PrintBeforeCallback; + OH_AI_KernelCallBack after_call_back = PrintAfterCallback; + predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, before_call_back, after_call_back); + }else { + printf("==========Model Predict==========\n"); + predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + } + ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS); + CompareResult(outputs, model_name); + OH_AI_ModelDestroy(&model); +} + +// predict on cpu +void Predict_CPU() { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,默认场景,不设置线程绑核 +HWTEST(MSLiteTest, OHOS_Context_CPU_0001, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,4线程 +HWTEST(MSLiteTest, OHOS_Context_CPU_0002, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 4); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 4); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,2线程 +HWTEST(MSLiteTest, OHOS_Context_CPU_0003, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 2); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 2); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,1线程 +HWTEST(MSLiteTest, OHOS_Context_CPU_0004, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 1); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 1); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 异常场景:Context设置CPU,0线程 +HWTEST(MSLiteTest, OHOS_Context_CPU_0005, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 0); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 0); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_NOT_SUPPORT); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:Context设置CPU,不绑核 +HWTEST(MSLiteTest, OHOS_Context_CPU_0006, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 4); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 4); + OH_AI_ContextSetThreadAffinityMode(context, 0); + int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context); + printf("==========thread_affinity_mode:%d\n", thread_affinity_mode); + ASSERT_EQ(thread_affinity_mode, 0); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,绑大核 +HWTEST(MSLiteTest, OHOS_Context_CPU_0007, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 4); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 4); + OH_AI_ContextSetThreadAffinityMode(context, 1); + int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context); + printf("==========thread_affinity_mode:%d\n", thread_affinity_mode); + ASSERT_EQ(thread_affinity_mode, 1); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,绑中核 +HWTEST(MSLiteTest, OHOS_Context_CPU_0008, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 4); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 4); + OH_AI_ContextSetThreadAffinityMode(context, 2); + int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context); + printf("==========thread_affinity_mode:%d\n", thread_affinity_mode); + ASSERT_EQ(thread_affinity_mode, 2); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 异常场景:Context设置CPU,绑核失败 +HWTEST(MSLiteTest, OHOS_Context_CPU_0009, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 4); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 4); + OH_AI_ContextSetThreadAffinityMode(context, 3); + int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context); + printf("==========thread_affinity_mode:%d\n", thread_affinity_mode); + ASSERT_EQ(thread_affinity_mode, 3); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:Context设置CPU,绑核列表{0,1,2,3} +HWTEST(MSLiteTest, OHOS_Context_CPU_0010, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 4); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 4); + constexpr size_t core_num = 4; + int32_t core_list[core_num] = {0, 1, 2, 3}; + OH_AI_ContextSetThreadAffinityCoreList(context, core_list, core_num); + size_t ret_core_num; + const int32_t *ret_core_list = nullptr; + ret_core_list = OH_AI_ContextGetThreadAffinityCoreList(context, &ret_core_num); + ASSERT_EQ(ret_core_num, core_num); + for (size_t i = 0; i < ret_core_num; i++) { + printf("==========ret_core_list:%d\n", ret_core_list[i]); + ASSERT_EQ(ret_core_list[i], core_list[i]); + } + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,绑核列表和模式同时开启 +HWTEST(MSLiteTest, OHOS_Context_CPU_0011, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 4); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 4); + OH_AI_ContextSetThreadAffinityMode(context, 1); + int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context); + printf("==========thread_affinity_mode:%d\n", thread_affinity_mode); + constexpr size_t core_num = 4; + int32_t core_list[core_num] = {0, 1, 3, 4}; + OH_AI_ContextSetThreadAffinityCoreList(context, core_list, core_num); + size_t ret_core_num; + const int32_t *ret_core_list = nullptr; + ret_core_list = OH_AI_ContextGetThreadAffinityCoreList(context, &ret_core_num); + ASSERT_EQ(ret_core_num, core_num); + for (size_t i = 0; i < ret_core_num; i++) { + printf("==========ret_core_list:%d\n", ret_core_list[i]); + ASSERT_EQ(ret_core_list[i], core_list[i]); + } + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,开启并行 +HWTEST(MSLiteTest, OHOS_Context_CPU_0012, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 4); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 4); + OH_AI_ContextSetEnableParallel(context, true); + bool is_parallel = OH_AI_ContextGetEnableParallel(context); + printf("==========is_parallel:%d\n", is_parallel); + ASSERT_EQ(is_parallel, true); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,关闭并行 +HWTEST(MSLiteTest, OHOS_Context_CPU_0013, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_ContextSetThreadNum(context, 4); + int32_t thread_num = OH_AI_ContextGetThreadNum(context); + printf("==========thread_num:%d\n", thread_num); + ASSERT_EQ(thread_num, 4); + OH_AI_ContextSetEnableParallel(context, false); + bool is_parallel = OH_AI_ContextGetEnableParallel(context); + printf("==========is_parallel:%d\n", is_parallel); + ASSERT_EQ(is_parallel, false); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,开启fp16 +HWTEST(MSLiteTest, OHOS_Context_CPU_0014, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + ASSERT_NE(cpu_device_info, nullptr); + OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, true); + bool is_fp16 = OH_AI_DeviceInfoGetEnableFP16(cpu_device_info); + printf("==========is_fp16:%d\n", is_fp16); + ASSERT_EQ(is_fp16, true); + OH_AI_ContextAddDeviceInfo(context, cpu_device_info); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,关闭fp16 +HWTEST(MSLiteTest, OHOS_Context_CPU_0015, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + ASSERT_NE(cpu_device_info, nullptr); + OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false); + bool is_fp16 = OH_AI_DeviceInfoGetEnableFP16(cpu_device_info); + printf("==========is_fp16:%d\n", is_fp16); + ASSERT_EQ(is_fp16, false); + OH_AI_ContextAddDeviceInfo(context, cpu_device_info); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,设置厂商名称 +HWTEST(MSLiteTest, OHOS_Context_CPU_0016, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + ASSERT_NE(cpu_device_info, nullptr); + OH_AI_DeviceInfoSetProvider(cpu_device_info, "vendor_new"); + ASSERT_EQ(strcmp(OH_AI_DeviceInfoGetProvider(cpu_device_info), "vendor_new"), 0); + OH_AI_ContextAddDeviceInfo(context, cpu_device_info); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,设置厂商设备类型 +HWTEST(MSLiteTest, OHOS_Context_CPU_0017, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + ASSERT_NE(cpu_device_info, nullptr); + OH_AI_DeviceInfoSetProviderDevice(cpu_device_info, "cpu_new"); + ASSERT_EQ(strcmp(OH_AI_DeviceInfoGetProviderDevice(cpu_device_info), "cpu_new"), 0); + OH_AI_ContextAddDeviceInfo(context, cpu_device_info); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:Context设置CPU,销毁MSDeviceInfo +HWTEST(MSLiteTest, OHOS_Context_CPU_0018, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + ASSERT_NE(cpu_device_info, nullptr); + OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(cpu_device_info); + printf("==========device_type:%d\n", device_type); + ASSERT_EQ(device_type, OH_AI_DEVICETYPE_CPU); + OH_AI_ContextAddDeviceInfo(context, cpu_device_info); + OH_AI_DeviceInfoDestroy(&cpu_device_info); + ASSERT_EQ(cpu_device_info, nullptr); +} + +// 正常场景:Context设置CPU,销毁OH_AI_Context +HWTEST(MSLiteTest, OHOS_Context_CPU_0019, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + ASSERT_NE(cpu_device_info, nullptr); + OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(cpu_device_info); + printf("==========device_type:%d\n", device_type); + ASSERT_EQ(device_type, OH_AI_DEVICETYPE_CPU); + OH_AI_ContextAddDeviceInfo(context, cpu_device_info); + OH_AI_ContextDestroy(&context); + ASSERT_EQ(context, nullptr); +} + +// 异常场景:Context不设置device info +HWTEST(MSLiteTest, OHOS_Context_CPU_0020, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:ModelBuild,调用指针方法 +HWTEST(MSLiteTest, OHOS_Model_Build_0001, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, true, true, false); +} + +// 异常场景:ModelBuild,model_data指向的不是模型数据 +HWTEST(MSLiteTest, OHOS_Model_Build_0002, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuild(model, imageBuf, size1, OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR); + delete[] imageBuf; + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelBuild,model_data为空 +HWTEST(MSLiteTest, OHOS_Model_Build_0003, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuild(model, nullptr, 0, OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR); + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelBuild,data_size为0 +HWTEST(MSLiteTest, OHOS_Model_Build_0004, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size; + size_t *ptr_size = &size; + const char *graphPath = "/data/test/ml_face_isface.ms"; + char *graphBuf = ReadFile(graphPath, ptr_size); + ASSERT_NE(graphBuf, nullptr); + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuild(model, graphBuf, 0, OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR); + delete[] graphBuf; + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelBuild,读取路径方法,且路径不是模型数据路径 +HWTEST(MSLiteTest, OHOS_Model_Build_0005, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.input", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR); + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelBuild,读取路径方法,路径为空 +HWTEST(MSLiteTest, OHOS_Model_Build_0006, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR); + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelBuild,model_type不支持 +HWTEST(MSLiteTest, OHOS_Model_Build_0007, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_INVALID, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_PARAM_INVALID); + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelBuild,model_context为空 +HWTEST(MSLiteTest, OHOS_Model_Build_0008, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, nullptr); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:ModelResize,shape与之前一致 +HWTEST(MSLiteTest, OHOS_Model_Resize_0001, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_ocr_cn", {4, {1, 32, 512, 1}}, false, true, false); +} + +// 正常场景:ModelResize,shape与之前不一致 +HWTEST(MSLiteTest, OHOS_Model_Resize_0002, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + printf("==========Resizes==========\n"); + OH_AI_ShapeInfo shape_infos = {4, {1, 64, 256, 1}}; + OH_AI_Status resize_ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num); + printf("==========Resizes return code:%d\n", resize_ret); + ASSERT_EQ(resize_ret, OH_AI_STATUS_SUCCESS); + FillInputsData(inputs, "ml_ocr_cn", false); + OH_AI_TensorHandleArray outputs; + printf("==========Model Predict==========\n"); + OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS); +} + +// 异常场景:ModelResize,shape为三维 +HWTEST(MSLiteTest, OHOS_Model_Resize_0003, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + printf("==========Resizes==========\n"); + OH_AI_ShapeInfo shape_infos = {4, {1, 32, 1}}; + ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num); + printf("==========Resizes return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR); + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelResize,shape值有负数 +HWTEST(MSLiteTest, OHOS_Model_Resize_0004, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + printf("==========Resizes==========\n"); + OH_AI_ShapeInfo shape_infos = {4, {1, -32, 32, 1}}; + ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num); + printf("==========Resizes return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR); + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelResize,不支持resize的模型 +HWTEST(MSLiteTest, OHOS_Model_Resize_0005, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + printf("==========Resizes==========\n"); + OH_AI_ShapeInfo shape_infos = {4, {1, 96, 96, 1}}; + ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num); + printf("==========Resizes return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:ModelPredict +HWTEST(MSLiteTest, OHOS_Model_Predict_0001, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 异常场景:ModelPredict,model被销毁 +HWTEST(MSLiteTest, OHOS_Model_Predict_0002, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + FillInputsData(inputs, "ml_face_isface", true); + printf("==========Model Predict==========\n"); + OH_AI_TensorHandleArray outputs; + OH_AI_ModelDestroy(&model); + ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + printf("==========Model Predict return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR); +} + +// 异常场景:ModelPredict,input为空 +HWTEST(MSLiteTest, OHOS_Model_Predict_0003, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========Model Predict==========\n"); + OH_AI_TensorHandleArray inputs; + OH_AI_TensorHandleArray outputs; + ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + printf("==========Model Predict return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:ModelPredict,传入回调函数 +HWTEST(MSLiteTest, OHOS_Model_Predict_0004, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, true); +} + +// 正常场景:ModelGetInputByTensorName +HWTEST(MSLiteTest, OHOS_Model_GetInputByTensorName_0001, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandle tensor = OH_AI_ModelGetInputByTensorName(model, "data"); + ASSERT_NE(tensor, nullptr); + int64_t element_num = OH_AI_TensorGetElementNum(tensor); + printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num); + float *input_data = reinterpret_cast(OH_AI_TensorGetMutableData(tensor)); + ASSERT_NE(input_data, nullptr); + printf("==========Transpose==========\n"); + size_t shape_num; + const int64_t *shape = OH_AI_TensorGetShape(tensor, &shape_num); + auto imageBuf_nhwc = new char[size1]; + PackNCHWToNHWCFp32(imageBuf, imageBuf_nhwc, shape[0], shape[1] * shape[2], shape[3]); + memcpy(input_data, imageBuf_nhwc, size1); + printf("input data is:"); + for (int j = 0; j < element_num && j <= 20; ++j) { + printf("%f ", input_data[j]); + } + printf("\n"); + printf("==========Model Predict==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + OH_AI_TensorHandleArray outputs; + ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + CompareResult(outputs, "ml_face_isface"); + delete[] imageBuf; + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelGetInputByTensorName,名称不存在 +HWTEST(MSLiteTest, OHOS_Model_GetInputByTensorName_0002, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandle tensor = OH_AI_ModelGetInputByTensorName(model, "aaa"); + ASSERT_EQ(tensor, nullptr); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:ModelGetOutputByTensorName +HWTEST(MSLiteTest, OHOS_Model_GetOutputByTensorName_0001, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + FillInputsData(inputs, "ml_face_isface", true); + printf("==========Model Predict==========\n"); + OH_AI_TensorHandleArray outputs; + ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetOutput==========\n"); + OH_AI_TensorHandle tensor = OH_AI_ModelGetOutputByTensorName(model, "prob"); + ASSERT_NE(tensor, nullptr); + int64_t element_num = OH_AI_TensorGetElementNum(tensor); + printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num); + float *output_data = reinterpret_cast(OH_AI_TensorGetMutableData(tensor)); + printf("output data is:"); + for (int j = 0; j < element_num && j <= 20; ++j) { + printf("%f ", output_data[j]); + } + printf("\n"); + printf("==========compFp32WithTData==========\n"); + bool result = compFp32WithTData(output_data, "/data/test/ml_face_isface0.output", 0.01, 0.01, false); + EXPECT_EQ(result, true); + OH_AI_ModelDestroy(&model); +} + +// 异常场景:ModelGetOutputByTensorName,名称不存在 +HWTEST(MSLiteTest, OHOS_Model_GetOutputByTensorName_0002, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + FillInputsData(inputs, "ml_face_isface", true); + printf("==========Model Predict==========\n"); + OH_AI_TensorHandleArray outputs; + ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetOutput==========\n"); + OH_AI_TensorHandle tensor = OH_AI_ModelGetOutputByTensorName(model, "aaa"); + ASSERT_EQ(tensor, nullptr); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:MSTensorCreate,创建tensor +HWTEST(MSLiteTest, OHOS_Tensor_Create_0001, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, nullptr, static_cast(1 * 48 * 48 * 3 * sizeof(float))); + ASSERT_NE(tensor, nullptr); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + inputs.handle_list[0] = tensor; + FillInputsData(inputs, "ml_face_isface", true); + printf("==========Model Predict==========\n"); + OH_AI_TensorHandleArray outputs; + ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + CompareResult(outputs, "ml_face_isface"); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:MSTensorDestroy,销毁tensor +HWTEST(MSLiteTest, OHOS_Tensor_Create_0002, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); + ASSERT_EQ(tensor, nullptr); +} + +// 正常场景:MSTensorGetName,获取tensor名称 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0003, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + const char *tensor_name = OH_AI_TensorGetName(tensor); + ASSERT_EQ(strcmp(tensor_name, "data"), 0); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorGetName,设置tensor名称 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0004, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + OH_AI_TensorSetName(tensor, "new_data"); + const char *tensor_name = OH_AI_TensorGetName(tensor); + ASSERT_EQ(strcmp(tensor_name, "new_data"), 0); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorGetDataType,获取tensor数据类型 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0005, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + OH_AI_DataType data_type = OH_AI_TensorGetDataType(tensor); + ASSERT_EQ(data_type, OH_AI_DATATYPE_NUMBERTYPE_FLOAT32); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorSetDataType,设置tensor数据类型 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0006, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + OH_AI_TensorSetDataType(tensor, OH_AI_DATATYPE_NUMBERTYPE_FLOAT16); + OH_AI_DataType data_type = OH_AI_TensorGetDataType(tensor); + ASSERT_EQ(data_type, OH_AI_DATATYPE_NUMBERTYPE_FLOAT16); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorGetShape,获取tensor维度 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0007, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + size_t ret_shape_num; + const int64_t *ret_shape = OH_AI_TensorGetShape(tensor, &ret_shape_num); + ASSERT_EQ(ret_shape_num, create_shape_num); + for (size_t i = 0; i < ret_shape_num; i++) { + ASSERT_EQ(ret_shape[i], create_shape[i]); + } + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorSetShape,设置tensor维度 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0008, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + size_t ret_shape_num; + const int64_t *ret_shape = OH_AI_TensorGetShape(tensor, &ret_shape_num); + ASSERT_EQ(ret_shape_num, create_shape_num); + for (size_t i = 0; i < ret_shape_num; i++) { + ASSERT_EQ(ret_shape[i], create_shape[i]); + } + constexpr size_t new_shape_num = 4; + int64_t new_shape[new_shape_num] = {1, 32, 32, 1}; + OH_AI_TensorSetShape(tensor, new_shape, new_shape_num); + size_t new_ret_shape_num; + const int64_t *new_ret_shape = OH_AI_TensorGetShape(tensor, &new_ret_shape_num); + ASSERT_EQ(new_ret_shape_num, new_shape_num); + for (size_t i = 0; i < new_ret_shape_num; i++) { + ASSERT_EQ(new_ret_shape[i], new_shape[i]); + } + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorGetFormat,获取tensor格式 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0009, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + OH_AI_Format data_format = OH_AI_TensorGetFormat(tensor); + ASSERT_EQ(data_format, OH_AI_FORMAT_NCHW); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorSetFormat,设置tensor格式 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0010, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + OH_AI_TensorSetFormat(tensor, OH_AI_FORMAT_NHWC); + OH_AI_Format data_format = OH_AI_TensorGetFormat(tensor); + ASSERT_EQ(data_format, OH_AI_FORMAT_NHWC); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorGetData,获取tensor数据 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0011, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + const float *ret_data = static_cast(OH_AI_TensorGetData(tensor)); + ASSERT_NE(ret_data, nullptr); + printf("return data is:"); + for (int i = 0; i < 20; ++i) { + printf("%f ", ret_data[i]); + } + printf("\n"); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorSetData,设置tensor数据 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0012, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + constexpr size_t data_len = 6; + float data[data_len] = {1, 2, 3, 4, 5, 6}; + OH_AI_TensorSetData(tensor, data); + const float *ret_data = static_cast(OH_AI_TensorGetData(tensor)); + ASSERT_NE(ret_data, nullptr); + printf("return data is:"); + for (size_t i = 0; i < data_len; i++) { + ASSERT_EQ(ret_data[i], data[i]); + printf("%f ", ret_data[i]); + } + printf("\n"); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorGetElementNum,获取tensor元素 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0013, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + int64_t element_num = OH_AI_TensorGetElementNum(tensor); + printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num); + ASSERT_EQ(element_num, 6912); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorGetDataSize,获取tensor大小 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0014, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + size_t data_size = OH_AI_TensorGetDataSize(tensor); + printf("Tensor data size: %zu.\n", data_size); + ASSERT_EQ(data_size, 6912 * sizeof(float)); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorGetMutableData,获取tensor可变数据指针 +HWTEST(MSLiteTest, OHOS_Tensor_Create_0015, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + float *input_data = reinterpret_cast(OH_AI_TensorGetMutableData(tensor)); + ASSERT_NE(input_data, nullptr); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); +} + +// 正常场景:MSTensorClone,拷贝tensor +HWTEST(MSLiteTest, OHOS_Tensor_Create_0016, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/ml_face_isface.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========OH_AI_TensorCreate==========\n"); + constexpr size_t create_shape_num = 4; + int64_t create_shape[create_shape_num] = {1, 48, 48, 3}; + OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, + create_shape_num, imageBuf, size1); + ASSERT_NE(tensor, nullptr); + OH_AI_TensorHandle clone = OH_AI_TensorClone(tensor); + ASSERT_NE(clone, nullptr); + ASSERT_EQ(strcmp(OH_AI_TensorGetName(clone), "data_duplicate"), 0); + delete[] imageBuf; + OH_AI_TensorDestroy(&tensor); + OH_AI_TensorDestroy(&clone); +} + +// 正常场景:单输入模型 +HWTEST(MSLiteTest, OHOS_Input_0001, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:多输入模型 +HWTEST(MSLiteTest, OHOS_Input_0002, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_headpose_pb2tflite", {}, false, false, false); +} + +// 正常场景:输入为uint8模型 +HWTEST(MSLiteTest, OHOS_Input_0003, Function | MediumTest | Level1) { + printf("==========ReadFile==========\n"); + size_t size1; + size_t *ptr_size1 = &size1; + const char *imagePath = "/data/test/aiy_vision_classifier_plants_V1_3.input"; + char *imageBuf = ReadFile(imagePath, ptr_size1); + ASSERT_NE(imageBuf, nullptr); + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/aiy_vision_classifier_plants_V1_3.ms", OH_AI_MODELTYPE_MINDIR, + context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + for (size_t i = 0; i < inputs.handle_num; ++i) { + OH_AI_TensorHandle tensor = inputs.handle_list[i]; + int64_t element_num = OH_AI_TensorGetElementNum(tensor); + printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num); + void *input_data = OH_AI_TensorGetMutableData(inputs.handle_list[i]); + ASSERT_NE(input_data, nullptr); + memcpy(input_data, imageBuf, size1); + } + printf("==========Model Predict==========\n"); + OH_AI_TensorHandleArray outputs; + ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetOutput==========\n"); + for (size_t i = 0; i < outputs.handle_num; ++i) { + OH_AI_TensorHandle tensor = outputs.handle_list[i]; + int64_t element_num = OH_AI_TensorGetElementNum(tensor); + printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num); + uint8_t *output_data = reinterpret_cast(OH_AI_TensorGetMutableData(tensor)); + printf("output data is:"); + for (int j = 0; j < element_num && j <= 20; ++j) { + printf("%d ", output_data[j]); + } + printf("\n"); + printf("==========compFp32WithTData==========\n"); + string expectedDataFile = "/data/test/aiy_vision_classifier_plants_V1_3" + std::to_string(i) + ".output"; + bool result = compUint8WithTData(output_data, expectedDataFile, 0.01, 0.01, false); + EXPECT_EQ(result, true); + } + delete[] imageBuf; + OH_AI_ModelDestroy(&model); +} + +// 正常场景:量化模型 +HWTEST(MSLiteTest, OHOS_Input_0004, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + ModelPredict(model, context, "ml_face_isface", {}, false, true, false); +} + +// 正常场景:循环多次执行推理流程 +HWTEST(MSLiteTest, OHOS_Multiple_0001, Function | MediumTest | Level1) { + for (size_t num = 0; num < 50; ++num) { + Predict_CPU(); + } +} + +// 异常场景:Model创建一次,Build多次 +HWTEST(MSLiteTest, OHOS_Multiple_0002, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========Build model==========\n"); + OH_AI_Status ret2 = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret2); + ASSERT_EQ(ret2, OH_AI_STATUS_SUCCESS); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:Model创建一次,Build一次,Predict多次 +HWTEST(MSLiteTest, OHOS_Multiple_0003, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + FillInputsData(inputs, "ml_face_isface", true); + OH_AI_TensorHandleArray outputs; + for (size_t i = 0; i < 50; ++i) { + printf("==========Model Predict==========\n"); + OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS); + } + CompareResult(outputs, "ml_face_isface"); + OH_AI_ModelDestroy(&model); +} + +// 正常场景:多次创建和销毁Model +HWTEST(MSLiteTest, OHOS_Multiple_0004, Function | MediumTest | Level1) { + for (size_t i = 0; i < 50; ++i) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========Build model==========\n"); + OH_AI_ModelDestroy(&model); + } +} + +// 正常场景:两个模型都在CPU上并行推理 +HWTEST(MSLiteTest, OHOS_Parallel_0001, Function | MediumTest | Level1) { + std::cout << "run start" << std::endl; + std::thread t1(Predict_CPU); + std::cout << "1111111111111" << std::endl; + std::thread t2(Predict_CPU); + std::cout << "2222222222222" << std::endl; + t1.join(); + t2.join(); +} + +// 正常场景:r1.3转换的模型在r1.5上推理 +HWTEST(MSLiteTest, OHOS_Compatible_0001, Function | MediumTest | Level1) { + printf("==========Init Context==========\n"); + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + ASSERT_NE(context, nullptr); + AddContextDeviceCPU(context); + printf("==========Create model==========\n"); + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + ASSERT_NE(model, nullptr); + printf("==========Build model==========\n"); + OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface_r13.ms", + OH_AI_MODELTYPE_MINDIR, context); + printf("==========build model return code:%d\n", ret); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + printf("==========GetInputs==========\n"); + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + ASSERT_NE(inputs.handle_list, nullptr); + FillInputsData(inputs, "ml_face_isface", true); + printf("==========Model Predict==========\n"); + OH_AI_TensorHandleArray outputs; + ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); + ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS); + CompareResult(outputs, "ml_face_isface"); + OH_AI_ModelDestroy(&model); +} \ No newline at end of file diff --git a/ai/mindspore/src/ohos_common.cpp b/ai/mindspore/src/ohos_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d0c2bd8c95f1fb06c2ac30dfab5cdbefb3e57483 --- /dev/null +++ b/ai/mindspore/src/ohos_common.cpp @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ohos_common.h" +#include +#include + +/* + * getDimInfo: get dim info from data file(int64_t) + * param: + * fp: the testing datafile object + * + * return : + * dim_info: array to store the info of the dim in datafile, like + * [4,3,3,6,3,162(3*3*6*3)],4 is dim size,3,3,6,3 is the dim shape data_size: + * the size of the testing data including the data file + * */ +void getDimInfo(FILE *fp, std::vector* dim_info) { + const int MAX_HEAD_SIZE = 50; + uint32_t *dim_buffer = reinterpret_cast(malloc(MAX_HEAD_SIZE * sizeof(uint32_t))); + size_t ret = fread(dim_buffer, sizeof(uint32_t), MAX_HEAD_SIZE, fp); + if (ret == 0) { + free(dim_buffer); + return; + } + dim_info->push_back(*dim_buffer); // get dim size + + // get data shape to compute the datasize + uint64_t data_size = 1; + uint32_t i = 1; + for (; i <= dim_info->at(0); i++) { + dim_info->push_back(*(dim_buffer + i)); + data_size *= *(dim_buffer + i); + } + dim_info->push_back(data_size); + + free(dim_buffer); +} + +/* + * readTestDataFile: read test date from hisi .t datafile(int64_t) + * param: + * infile: the path of hisi .t datafile + * return: + * dim_info: array to store the info of the dim in datafile, like [4,3,3,6,3],4 + * is dim size,3,3,6,3 is the dim shape + * */ +void *readTestDataFile(std::string infile, std::vector* dim_info1) { + printf("\n [common.cpp] Loading data from: %s\n", infile.c_str()); + + FILE *fp; + fp = fopen(infile.c_str(), "r"); + if (fp == nullptr) { + printf("ERROR: cant't open file %s\n", infile.c_str()); + return nullptr; + } else { + std::vector dim_info; + std::vector* ptr_dim_info = &dim_info; + getDimInfo(fp, ptr_dim_info); + uint64_t data_size = ptr_dim_info->at(ptr_dim_info->size() - 1); + fclose(fp); + + fp = fopen(infile.c_str(), "r"); + if (fp == nullptr) { + printf("ERROR: cant't open file %s\n", infile.c_str()); + return nullptr; + } + uint32_t *memory = reinterpret_cast(malloc((dim_info[0] + 1) * sizeof(uint32_t))); + + size_t ret = fread(memory, sizeof(uint32_t), (dim_info[0] + 1), fp); + if (ret == 0) { + free(memory); + fclose(fp); + return nullptr; + } + uint32_t *data = reinterpret_cast(malloc((data_size) * sizeof(uint32_t))); + size_t ret2 = fread(data, sizeof(uint32_t), data_size, fp); + if (ret2 == 0) { + free(data); + fclose(fp); + return nullptr; + } + free(memory); + fclose(fp); + + for (int i = 0; i < dim_info[0]; i++) { + dim_info1->push_back(dim_info[i + 1]); + } + + printf("\n [common.cpp] Read test data file Over, get dimInfo as: ("); + int count = dim_info1->size(); + for (int i = 0; i < count; i++) { + printf("%" PRId64, dim_info1->at(i)); + } + printf(")\n"); + return data; + } +} + +/* + * allclose + * param: + * a:compared file a + * b:compared file b + * count: the count size which will compare + * rtol: + * atol: + * return: + * true or false + * */ +bool allclose(float *a, float *b, uint64_t count, float rtol = 1e-05, + float atol = 1e-08, bool isquant = false) { + uint32_t i = 0; + + // add fail loop print + uint32_t fail_count = 0; + float tol = 0; + float tol1 = 0; + float tol2 = 0; + bool nan_occur_in_accuray = false; + + float sum = 0.0f; + static float sum_all; + static float maximum = 0; + static float minimum = 0; + static uint64_t c = 0; + + if (a == nullptr || b == nullptr) { + return false; + } + + for (; i < count; ++i) { + sum = sum + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i])); + sum_all = sum_all + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i])); + maximum = max(maximum, fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]))); + minimum = min(minimum, fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]))); + if (isnan(a[i]) || isinf(a[i])) { + fail_count = fail_count + 1; + nan_occur_in_accuray = true; + if (fail_count < 100) { + printf(" i = %2u: %+f | %+f\n", i, a[i], b[i]); + } + } else if (fabs(a[i] - b[i]) > (atol + rtol * fabs(b[i]))) { + tol = tol + fabs(a[i] - b[i]) / (fabs(b[i]) + 1); + tol1 = tol1 + fabs(a[i] - b[i]); + tol2 = tol2 + fabs(a[i] - b[i]) / fabs(b[i]); + fail_count = fail_count + 1; + if (fail_count < 100) { + printf(" i = %2u: %+f | %+f\n", i, a[i], b[i]); + } + } + + if (i == count - 1) { + printf(" ……\n"); + printf("\n *** Total fail_count: %u\n", fail_count); + printf("\n fabs(a[i] - b[i])/(fabs(b[i])+1) : %f\n", + tol / fail_count); + printf("\n fabs(a[i] - b[i]) : %f\n", tol1 / fail_count); + printf("\n fabs(a[i] - b[i])/fabs(b[i]) : %f\n", tol2 / fail_count); + + c = c + count; + printf("\n avg : %f\n", sum / count); + printf("\n min : %f\n", minimum); + printf("\n max : %f\n", maximum); + printf("\n avg_all : %f\n", sum_all / c); + printf("\n"); + fstream file; + file.open("cout.csv", ios::app); + + file << "," + << "1," + << "0," << maximum; + if (fail_count == 0) { + file << "," << sum_all / c; + } else { + file << "," << tol / fail_count; + } + file.close(); + } + } + if (nan_occur_in_accuray) { + printf("\n[common.cpp] eval output include some NAN/INF\n"); + return false; + } + + if (fail_count > 0) { + printf("\n *** These data compare failed: atol = %f, rtol = %f\n", atol, + rtol); + printf("\n"); + if (isquant) { + if (tol / fail_count < 0.04) { + return true; + } + } + return false; + } + return true; +} + +bool allclose_int8(uint8_t *a, uint8_t *b, uint64_t count, float rtol = 1e-05, + float atol = 1e-08, bool isquant = false) { + uint32_t i = 0; + // add fail loop print + uint32_t fail_count = 0; + float tol = 0; + float tol1 = 0; + float tol2 = 0; + bool nan_occur_in_accuray = false; + + float sum = 0.0f; + static float sum_all; + static float maximum = 0; + static float minimum = 0; + static uint64_t c = 0; + // add fail loop print + + if (a == nullptr || b == nullptr) { + return false; + } + + for (; i < count; ++i) { + sum = sum + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i])); + sum_all = sum_all + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i])); + maximum = max(static_cast(maximum), + static_cast(fabs(a[i] - b[i])) / (atol + rtol * fabs(b[i]))); + minimum = min(static_cast(minimum), + static_cast(fabs(a[i] - b[i])) / (atol + rtol * fabs(b[i]))); + if (isnan(a[i]) || isinf(a[i])) { + fail_count = fail_count + 1; + nan_occur_in_accuray = true; + if (fail_count < 100) { + printf(" i = %2u: %+f | %+f\n", i, static_cast(a[i]), static_cast(b[i])); + } + } else if (fabs(a[i] - b[i]) > 0) { + tol = tol + fabs(a[i] - b[i]) / (fabs(b[i]) + 1); + tol1 = tol1 + fabs(a[i] - b[i]); + tol2 = tol2 + fabs(a[i] - b[i]) / fabs(b[i]); + fail_count = fail_count + 1; + printf("%2d", static_cast(fabs(a[i] - b[i]))); + printf(" i = %2u: %2d | %2d\n", i, a[i], b[i]); + } + if (i == count - 1) { + printf(" ……\n"); + printf("\n *** Total fail_count: %u\n", fail_count); + printf("\n fabs(a[i] - b[i])/(fabs(b[i])+1) : %f\n", + tol / fail_count); + printf("\n fabs(a[i] - b[i]) : %f\n", tol1 / fail_count); + printf("\n fabs(a[i] - b[i])/fabs(b[i]) : %f\n", tol2 / fail_count); + + c = c + count; + printf("\n avg : %f\n", sum / count); + printf("\n min : %f\n", minimum); + + printf("\n max : %f\n", maximum); + printf("\n avg_all : %f\n", sum_all / c); + printf("\n"); + fstream file; + file.open("cout.csv", ios::app); + + file << "," + << "1," + << "0," << maximum; + if (fail_count == 0) { + file << "," << sum_all / c; + } else { + file << "," << tol / fail_count; + } + file.close(); + } + } + if (nan_occur_in_accuray) { + printf("\n[common.cpp] eval output include some NAN/INF\n"); + return false; + } + if (fail_count > 0) { + printf("\n *** These data compare failed: atol = %f, rtol = %f\n", atol, + rtol); + printf("\n"); + if (isquant) { + if (tol / fail_count < 0.04) { + return true; + } + } + return false; + } + return true; +} + +/* + * compFp32WithTData: compare the data with the data in hisi .t file + * param: + * actualOutputData: the result of ge + * expectedDataFile: the path of hisi .t result file + * rtol: + * atol: + * return: + * true of false + * */ +bool compFp32WithTData(float *actualOutputData, const std::string& expectedDataFile, + float rtol = 1e-05, float atol = 1e-08, + bool isquant = false) { + std::vector dim_info; + std::vector* ptr_dim_info = &dim_info; + float *expectedOutputData = + reinterpret_cast(readTestDataFile(expectedDataFile, ptr_dim_info)); + uint32_t i = 0; + uint64_t data_size = 1; + data_size = accumulate(dim_info.begin(), dim_info.end(), 1, std::multiplies()); + + // print caffe/tf output: + printf("[common.cpp] expected output data:"); + for (; i < data_size && i < 10; i++) { + printf("%4f ", expectedOutputData[i]); + } + printf("\n"); + if (isquant) { + return allclose(actualOutputData, expectedOutputData, data_size, rtol, atol, + true); + } + return allclose(actualOutputData, expectedOutputData, data_size, rtol, atol); +} + +bool compUint8WithTData(uint8_t *actualOutputData, const std::string& expectedDataFile, + float rtol = 1e-05, float atol = 1e-08, + bool isquant = false) { + std::vector dim_info; + std::vector* ptr_dim_info = &dim_info; + auto dataFile = readTestDataFile(expectedDataFile, ptr_dim_info); + if(dataFile == nullptr){ + return false; + } + uint8_t *expectedOutputData = + reinterpret_cast(dataFile); + uint32_t i = 0; + uint64_t data_size = 1; + data_size = accumulate(dim_info.begin(), dim_info.end(), 1, std::multiplies()); + + // print caffe/tf output: + printf("\n [common.cpp] expected output data:\n"); + for (; i < data_size && i < 10; i++) { + printf("%4hhu ", static_cast(expectedOutputData[i])); + } + printf("\n"); + if (isquant) { + return allclose_int8(actualOutputData, expectedOutputData, data_size, rtol, + atol, true); + } + return allclose_int8(actualOutputData, expectedOutputData, data_size, rtol, + atol); +} + +/* + * ReadFile: read file of model + * param: + * file: file location + * size: file size + * return: + * buf of file + * */ +char *ReadFile(const char *file, size_t* size) { + printf("[common.cpp] Loading data from: %s\n", file); + + std::ifstream ifs(file); + if (!ifs.good()) { + return nullptr; + } + + if (!ifs.is_open()) { + ifs.close(); + return nullptr; + } + + ifs.seekg(0, std::ios::end); + *size = ifs.tellg(); + + char *buf = new char[*size]; + if (buf == nullptr) { + ifs.close(); + return nullptr; + } + + ifs.seekg(0, std::ios::beg); + ifs.read(buf, *size); + ifs.close(); + printf("[common.cpp]Read Binary Data Over, get tensorSize as: %" PRId64 ".\n", static_cast(*size)); + + return buf; +} + +void PackNCHWToNHWCFp32(const char *src, char *dst, int batch, int plane, int channel) { + for (int n = 0; n < batch; n++) { + for (int c = 0; c < channel; c++) { + for (int hw = 0; hw < plane; hw++) { + int nhwc_index = n * channel * plane + hw * channel + c; + int nchw_index = n * channel * plane + c * plane + hw; + dst[nhwc_index * 4] = src[nchw_index * 4]; + dst[nhwc_index * 4 + 1] = src[nchw_index * 4 + 1]; + dst[nhwc_index * 4 + 2] = src[nchw_index * 4 + 2]; + dst[nhwc_index * 4 + 3] = src[nchw_index * 4 + 3]; + } + } + } + return; +} diff --git a/ai/mindspore/src/ohos_common.h b/ai/mindspore/src/ohos_common.h new file mode 100644 index 0000000000000000000000000000000000000000..6bd28eac762f183194b6e569f82d0a850fe69224 --- /dev/null +++ b/ai/mindspore/src/ohos_common.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GE_COMMON_H +#define GE_COMMON_H + +#include +#include +#include +#include +#include +#include +#include +#include + +using std::string; +using std::cout; +using std::endl; +using std::map; +using std::ios; +using std::fstream; +using std::to_string; +using std::ifstream; +using std::stringstream; +using std::max; +using std::min; +using std::vector; +// using namespace mindspore::tensor; + +bool compFp32WithTData(float *actualOutputData, + const std::string &expectedDataFile, float rtol, + float atol, bool isquant); +bool allclose_int8(int8_t *a, int8_t *b, uint64_t count, float rtol, float atol, + bool isquant); +bool compUint8WithTData(uint8_t *actualOutputData, + const std::string &expectedDataFile, float rtol, + float atol, bool isquant); +//// add for mslite test of int64: +void getDimInfo(FILE *fp, std::vector* dim_info); +char *ReadFile(const char *file, size_t* size); +void PackNCHWToNHWCFp32(const char *src, char *dst, int batch, int plane, int channel); + +#endif // GE_COMMON_H \ No newline at end of file