提交 83d33c99 编写于 作者: T TonyWang222

add ms xts tests

Change-Id: Ia6db5a7f6b37a14331ecc71a1eaa8e2f3565adff
Signed-off-by: NTonyWang222 <wangtongyu6@huawei.com>
上级 1969b9e0
...@@ -15,6 +15,6 @@ group("ai") { ...@@ -15,6 +15,6 @@ group("ai") {
testonly = true testonly = true
deps = [ "neural_network_runtime:neural_network_runtime" ] deps = [ "neural_network_runtime:neural_network_runtime" ]
if (is_standard_system) { if (is_standard_system) {
deps += ["mindspore:ActsMindSporeTest"] deps += [ "mindspore:ActsMindSporeTest" ]
} }
} }
...@@ -13,14 +13,13 @@ ...@@ -13,14 +13,13 @@
import("//test/xts/tools/build/suite.gni") import("//test/xts/tools/build/suite.gni")
module_output_path = "hits/ActsMindSporeTest" module_output_path = "acts/ActsMindSporeTest"
ohos_moduletest_suite("ActsMindSporeTest") { ohos_moduletest_suite("ActsMindSporeTest") {
module_out_path = module_output_path module_out_path = module_output_path
sources = [ sources = [
"ndk_test/context_test.cc", "src/ohos_c_api_test_mslite.cpp",
"ndk_test/model_test.cc", "src/ohos_common.cpp",
"ndk_test/tensor_test.cc",
] ]
include_dirs = [ "//foundation/ai/mindspore/" ] include_dirs = [ "//foundation/ai/mindspore/" ]
...@@ -30,4 +29,8 @@ ohos_moduletest_suite("ActsMindSporeTest") { ...@@ -30,4 +29,8 @@ ohos_moduletest_suite("ActsMindSporeTest") {
"//third_party/googletest:gtest", "//third_party/googletest:gtest",
"//third_party/mindspore/mindspore/lite:mindspore_lib", "//third_party/mindspore/mindspore/lite:mindspore_lib",
] ]
defines = [ "__STDC_FORMAT_MACROS" ]
subsystem_name = "ai"
part_name = "mindspore"
} }
...@@ -8,21 +8,37 @@ ...@@ -8,21 +8,37 @@
"type": "CppTest" "type": "CppTest"
}, },
"kits": [ "kits": [
{
"post-push" : [
"chmod -R 777 /data/local/tmp/*"
],
"push": [
"ActsMindSporeTest->/data/local/tmp/ActsMindSporeTest"
],
"type": "PushKit"
},
{ {
"type": "ShellKit", "type": "ShellKit",
"run-command": [ "run-command": [
"remount", "remount",
"mkdir /data/test" "mkdir /data/test"
] ]
},
{
"type": "PushKit",
"push": [
"ActsMindSporeTest->/data/local/tmp/ActsMindSporeTest",
"resources/ai/mindspore/aiy_vision_classifier_plants_V1_3/aiy_vision_classifier_plants_V1_3.ms -> /data/test",
"resources/ai/mindspore/aiy_vision_classifier_plants_V1_3/aiy_vision_classifier_plants_V1_3.input -> /data/test",
"resources/ai/mindspore/aiy_vision_classifier_plants_V1_3/aiy_vision_classifier_plants_V1_30.output -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isface.ms -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isfacer13.ms -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isface.input -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isface_0.input -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isface0.output -> /data/test",
"resources/ai/mindspore/ml_Hand_deploy/ml_Hand_deploy.ms -> /data/test",
"resources/ai/mindspore/ml_Hand_deploy/ml_Hand_deploy_0.input -> /data/test",
"resources/ai/mindspore/ml_Hand_deploy/ml_Hand_deploy0.output -> /data/test",
"resources/ai/mindspore/ml_ocr_cn/ml_ocr_cn_0.input -> /data/test",
"resources/ai/mindspore/ml_ocr_cn/ml_ocr_cn.ms -> /data/test",
"resources/ai/mindspore/ml_ocr_cn/ml_ocr_cn0.output -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite.ms -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite_0.input -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite_1.input -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite_2.input -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite0.output -> /data/test"
]
} }
] ]
} }
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/c_api/context_c.h"
#include "gtest/gtest.h"
using namespace testing::ext;
class ContextTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
HWTEST(ContextTest, TestCase_0001, Function | MediumTest | Level1) {
OH_AI_DeviceInfoHandle npu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_KIRIN_NPU);
EXPECT_TRUE(npu_device_info != nullptr);
EXPECT_EQ(OH_AI_DeviceInfoGetDeviceType(npu_device_info), OH_AI_DEVICETYPE_KIRIN_NPU);
OH_AI_DeviceInfoSetProvider(npu_device_info, "vendor name");
EXPECT_STREQ(OH_AI_DeviceInfoGetProvider(npu_device_info), "vendor name");
OH_AI_DeviceInfoSetProviderDevice(npu_device_info, "npu_a");
EXPECT_STREQ(OH_AI_DeviceInfoGetProviderDevice(npu_device_info), "npu_a");
OH_AI_DeviceInfoSetFrequency(npu_device_info, 3);
EXPECT_EQ(OH_AI_DeviceInfoGetFrequency(npu_device_info), 3);
OH_AI_ContextHandle context = OH_AI_ContextCreate();
EXPECT_TRUE(context != nullptr);
OH_AI_ContextSetThreadNum(context, 4);
EXPECT_EQ(OH_AI_ContextGetThreadNum(context), 4);
OH_AI_ContextSetThreadAffinityMode(context, 2);
EXPECT_EQ(OH_AI_ContextGetThreadAffinityMode(context), 2);
constexpr size_t core_num = 4;
int32_t core_list[core_num] = {1, 3, 2, 0};
OH_AI_ContextSetThreadAffinityCoreList(context, core_list, core_num);
size_t ret_core_num;
const int32_t *ret_core_list = nullptr;
ret_core_list = OH_AI_ContextGetThreadAffinityCoreList(context, &ret_core_num);
EXPECT_EQ(ret_core_num, core_num);
for (size_t i = 0; i < ret_core_num; i++) {
EXPECT_EQ(ret_core_list[i], core_list[i]);
}
OH_AI_ContextSetEnableParallel(context, true);
EXPECT_EQ(OH_AI_ContextGetEnableParallel(context), true);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
OH_AI_DeviceInfoDestroy(&cpu_device_info);
cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, true);
EXPECT_EQ(OH_AI_DeviceInfoGetEnableFP16(cpu_device_info), true);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
OH_AI_ContextAddDeviceInfo(context, npu_device_info);
OH_AI_ContextDestroy(&context);
}
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fstream>
#include "include/c_api/model_c.h"
#include "gtest/gtest.h"
using namespace testing::ext;
const char* MODEL_FILE_PATH = "/data/test/mobilenetv2.ms";
std::map<std::string, std::pair<int, float>> g_op_times_by_type_;
std::map<std::string, std::pair<int, float>> g_op_times_by_name_;
#ifdef __cplusplus
extern "C" {
#endif
uint64_t g_op_begin_ = 0;
int g_op_call_times_total_ = 0;
float g_op_cost_total_ = 0.0f;
const int USEC = 1000000;
const int MSEC = 1000;
uint64_t GetTimeUs() {
struct timespec ts = {0, 0};
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
return 0;
}
auto ret_val = static_cast<uint64_t>((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC));
return ret_val;
}
bool TimeBeforeCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
const OH_AI_CallBackParam kernel_Info) {
if (g_op_times_by_type_.find(kernel_Info.node_type) == g_op_times_by_type_.end()) {
g_op_times_by_type_.insert(std::make_pair(kernel_Info.node_type, std::make_pair(0, 0.0f)));
}
if (g_op_times_by_name_.find(kernel_Info.node_name) == g_op_times_by_name_.end()) {
g_op_times_by_name_.insert(std::make_pair(kernel_Info.node_name, std::make_pair(0, 0.0f)));
}
g_op_call_times_total_++;
g_op_begin_ = GetTimeUs();
return true;
}
bool TimeAfterCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
const OH_AI_CallBackParam kernel_Info) {
uint64_t opEnd = GetTimeUs();
constexpr float kFloatMSEC = 1000.0f;
float cost = static_cast<float>(opEnd - g_op_begin_) / kFloatMSEC;
g_op_cost_total_ += cost;
g_op_times_by_type_[kernel_Info.node_type].first++;
g_op_times_by_type_[kernel_Info.node_type].second += cost;
g_op_times_by_name_[kernel_Info.node_name].first++;
g_op_times_by_name_[kernel_Info.node_name].second += cost;
return true;
}
#ifdef __cplusplus
}
#endif
class ModelTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
int GenerateInputDataWithRandom(OH_AI_TensorHandleArray inputs) {
for (size_t i = 0; i < inputs.handle_num; ++i) {
float *input_data = (float *)OH_AI_TensorGetMutableData(inputs.handle_list[i]);
if (input_data == NULL) {
printf("OH_AI_TensorGetMutableData failed.\n");
return OH_AI_STATUS_LITE_ERROR;
}
int64_t num = OH_AI_TensorGetElementNum(inputs.handle_list[i]);
const int divisor = 10;
for (int64_t j = 0; j < num; j++) {
input_data[j] = (float)(rand() % divisor) / divisor; // 0--0.9f
}
}
return OH_AI_STATUS_SUCCESS;
}
char *ReadFile(const char *file, size_t *size) {
std::fstream ifs;
ifs.open(file, std::ifstream::in | std::ifstream::binary);
if (!ifs.good() || !ifs.is_open()) {
return nullptr;
}
ifs.seekg(0, std::ios::end);
*size = ifs.tellg();
auto buf = std::make_unique<char[]>(*size);
EXPECT_NE(buf, nullptr);
ifs.seekg(0, std::ios::beg);
ifs.read(buf.get(), *size);
return buf.release();
}
HWTEST(ModelTest, TestCase_0001_build, Function | MediumTest | Level1) {
OH_AI_ContextHandle context = OH_AI_ContextCreate();
if (context == nullptr) {
printf("OH_AI_ContextCreate failed.\n");
EXPECT_NE(context, nullptr);
}
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
if (cpu_device_info == nullptr) {
printf("OH_AI_DeviceInfoCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(cpu_device_info, nullptr);
}
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
OH_AI_ModelHandle model = OH_AI_ModelCreate();
if (model == nullptr) {
printf("OH_AI_ModelCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(model, nullptr);
}
size_t size = 0;
auto buff = ReadFile(MODEL_FILE_PATH, &size);
EXPECT_NE(buff, nullptr);
int ret = OH_AI_ModelBuild(model, buff, size, OH_AI_MODELTYPE_MINDIR, context);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
OH_AI_ModelSetWorkspace(model, nullptr, 0);
OH_AI_ModelDestroy(&model);
}
HWTEST(ModelTest, TestCase_0002_predict, Function | MediumTest | Level1) {
// Create and init context, add CPU device info
OH_AI_ContextHandle context = OH_AI_ContextCreate();
if (context == nullptr) {
printf("OH_AI_ContextCreate failed.\n");
EXPECT_NE(context, nullptr);
}
const int thread_num = 2;
OH_AI_ContextSetThreadNum(context, thread_num);
OH_AI_ContextSetThreadAffinityMode(context, 1);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
if (cpu_device_info == nullptr) {
printf("OH_AI_DeviceInfoCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(cpu_device_info, nullptr);
}
OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
// Create model
OH_AI_ModelHandle model = OH_AI_ModelCreate();
if (model == nullptr) {
printf("OH_AI_ModelCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(model, nullptr);
}
// Build model
int ret = OH_AI_ModelBuildFromFile(model, MODEL_FILE_PATH, OH_AI_MODELTYPE_MINDIR, context);
if (ret != OH_AI_STATUS_SUCCESS) {
printf("OH_AI_ModelBuildFromFile failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
}
// Get Inputs
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
if (inputs.handle_list == nullptr) {
printf("OH_AI_ModelGetInputs failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_NE(inputs.handle_list, nullptr);
}
std::vector<OH_AI_ShapeInfo> shape_infos = {{4, {1, 112, 112, 3}}};
ret = OH_AI_ModelResize(model, inputs, shape_infos.data(), shape_infos.size());
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
// Generate random data as input data.
ret = GenerateInputDataWithRandom(inputs);
if (ret != OH_AI_STATUS_SUCCESS) {
printf("GenerateInputDataWithRandom failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
}
// Model Predict
OH_AI_TensorHandleArray outputs = OH_AI_ModelGetOutputs(model);
ret = OH_AI_ModelPredict(model, inputs, &outputs, TimeBeforeCallback, TimeAfterCallback);
if (ret != OH_AI_STATUS_SUCCESS) {
printf("OH_AI_ModelPredict failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
}
// Print Input and Output Tensor Data.
for (size_t i = 0; i < inputs.handle_num; ++i) {
OH_AI_TensorHandle tensor = inputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s.\n", OH_AI_TensorGetName(tensor));
const float *data =
(const float *)OH_AI_TensorGetData(OH_AI_ModelGetInputByTensorName(model, OH_AI_TensorGetName(tensor)));
printf("input data is:\n");
const int max_print_num = 10;
for (int j = 0; j < element_num && j <= max_print_num; ++j) {
printf("%f ", data[i]);
}
printf("\n");
}
for (size_t i = 0; i < outputs.handle_num; ++i) {
OH_AI_TensorHandle tensor = outputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s.\n", OH_AI_TensorGetName(tensor));
const float *data =
(const float *)OH_AI_TensorGetData(OH_AI_ModelGetOutputByTensorName(model, OH_AI_TensorGetName(tensor)));
printf("output data is:\n");
const int max_print_num = 10;
for (int j = 0; j < element_num && j <= max_print_num; ++j) {
printf("%f ", data[i]);
}
printf("\n");
}
// Delete model.
OH_AI_ModelDestroy(&model);
EXPECT_EQ(model, nullptr);
}
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thread>
#include <inttypes.h>
#include "ohos_common.h"
#include "gtest/gtest.h"
#include "include/c_api/context_c.h"
#include "include/c_api/model_c.h"
#include "include/c_api/types_c.h"
#include "include/c_api/status_c.h"
#include "include/c_api/data_type_c.h"
#include "include/c_api/tensor_c.h"
#include "include/c_api/format_c.h"
using namespace testing::ext;
class MSLiteTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
// function before callback
bool PrintBeforeCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
const OH_AI_CallBackParam kernel_Info) {
std::cout << "Before forwarding " << kernel_Info.node_name << " " << kernel_Info.node_type << std::endl;
return true;
}
// function after callback
bool PrintAfterCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
const OH_AI_CallBackParam kernel_Info) {
std::cout << "After forwarding " << kernel_Info.node_name << " " << kernel_Info.node_type << std::endl;
return true;
}
// add cpu device info
void AddContextDeviceCPU(OH_AI_ContextHandle context) {
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
ASSERT_NE(cpu_device_info, nullptr);
OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(cpu_device_info);
printf("==========device_type:%d\n", device_type);
ASSERT_EQ(device_type, OH_AI_DEVICETYPE_CPU);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
}
// add gpu device info
void AddContextDeviceGPU(OH_AI_ContextHandle context) {
OH_AI_DeviceInfoHandle gpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_GPU);
ASSERT_NE(gpu_device_info, nullptr);
OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(gpu_device_info);
printf("==========device_type:%d\n", device_type);
ASSERT_EQ(device_type, OH_AI_DEVICETYPE_GPU);
OH_AI_ContextAddDeviceInfo(context, gpu_device_info);
}
// fill data to inputs tensor
void FillInputsData(OH_AI_TensorHandleArray inputs, string model_name, bool is_transpose) {
for (size_t i = 0; i < inputs.handle_num; ++i) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
string input_data_path = "/data/test/" + model_name + "_" + std::to_string(i) + ".input";
const char *imagePath = input_data_path.c_str();
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
OH_AI_TensorHandle tensor = inputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s. \n", OH_AI_TensorGetName(tensor));
float *input_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(inputs.handle_list[i]));
ASSERT_NE(input_data, nullptr);
if (is_transpose) {
printf("==========Transpose==========\n");
size_t shape_num;
const int64_t *shape = OH_AI_TensorGetShape(tensor, &shape_num);
auto imageBuf_nhwc = new char[size1];
PackNCHWToNHWCFp32(imageBuf, imageBuf_nhwc, shape[0], shape[1] * shape[2], shape[3]);
memcpy(input_data, imageBuf_nhwc, size1);
} else {
memcpy(input_data, imageBuf, size1);
}
printf("input data after filling is: ");
for (int j = 0; j < element_num && j <= 20; ++j) {
printf("%f ", input_data[j]);
}
printf("\n");
delete[] imageBuf;
}
}
// compare result after predict
void CompareResult(OH_AI_TensorHandleArray outputs, string model_name) {
printf("==========GetOutput==========\n");
for (size_t i = 0; i < outputs.handle_num; ++i) {
OH_AI_TensorHandle tensor = outputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s .\n", OH_AI_TensorGetName(tensor));
float *output_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
printf("output data is:");
for (int j = 0; j < element_num && j <= 20; ++j) {
printf("%f ", output_data[j]);
}
printf("\n");
printf("==========compFp32WithTData==========\n");
string output_file = "/data/test/" + model_name + std::to_string(i) + ".output";
bool result = compFp32WithTData(output_data, output_file, 0.01, 0.01, false);
EXPECT_EQ(result, true);
}
}
// model build and predict
void ModelPredict(OH_AI_ModelHandle model, OH_AI_ContextHandle context, string model_name,
OH_AI_ShapeInfo shape_infos, bool build_by_graph, bool is_transpose, bool is_callback) {
string model_path = "/data/test/" + model_name + ".ms";
const char *graphPath = model_path.c_str();
OH_AI_Status ret = OH_AI_STATUS_SUCCESS;
if (build_by_graph) {
printf("==========Build model by graphBuf==========\n");
size_t size;
size_t *ptr_size = &size;
char *graphBuf = ReadFile(graphPath, ptr_size);
ASSERT_NE(graphBuf, nullptr);
ret = OH_AI_ModelBuild(model, graphBuf, size, OH_AI_MODELTYPE_MINDIR, context);
} else {
printf("==========Build model==========\n");
ret = OH_AI_ModelBuildFromFile(model, graphPath, OH_AI_MODELTYPE_MINDIR, context);
}
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
if (shape_infos.shape_num != NULL) {
printf("==========Resizes==========\n");
OH_AI_Status resize_ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
printf("==========Resizes return code:%d\n", resize_ret);
ASSERT_EQ(resize_ret, OH_AI_STATUS_SUCCESS);
}
FillInputsData(inputs, model_name, is_transpose);
OH_AI_TensorHandleArray outputs;
OH_AI_Status predict_ret = OH_AI_STATUS_SUCCESS;
if (is_callback) {
printf("==========Model Predict Callback==========\n");
OH_AI_KernelCallBack before_call_back = PrintBeforeCallback;
OH_AI_KernelCallBack after_call_back = PrintAfterCallback;
predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, before_call_back, after_call_back);
}else {
printf("==========Model Predict==========\n");
predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
}
ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
CompareResult(outputs, model_name);
OH_AI_ModelDestroy(&model);
}
// predict on cpu
void Predict_CPU() {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,默认场景,不设置线程绑核
HWTEST(MSLiteTest, OHOS_Context_CPU_0001, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,4线程
HWTEST(MSLiteTest, OHOS_Context_CPU_0002, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 4);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 4);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,2线程
HWTEST(MSLiteTest, OHOS_Context_CPU_0003, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 2);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 2);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,1线程
HWTEST(MSLiteTest, OHOS_Context_CPU_0004, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 1);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 1);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 异常场景:Context设置CPU,0线程
HWTEST(MSLiteTest, OHOS_Context_CPU_0005, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 0);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 0);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_NOT_SUPPORT);
OH_AI_ModelDestroy(&model);
}
// 正常场景:Context设置CPU,不绑核
HWTEST(MSLiteTest, OHOS_Context_CPU_0006, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 4);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 4);
OH_AI_ContextSetThreadAffinityMode(context, 0);
int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
ASSERT_EQ(thread_affinity_mode, 0);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,绑大核
HWTEST(MSLiteTest, OHOS_Context_CPU_0007, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 4);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 4);
OH_AI_ContextSetThreadAffinityMode(context, 1);
int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
ASSERT_EQ(thread_affinity_mode, 1);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,绑中核
HWTEST(MSLiteTest, OHOS_Context_CPU_0008, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 4);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 4);
OH_AI_ContextSetThreadAffinityMode(context, 2);
int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
ASSERT_EQ(thread_affinity_mode, 2);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 异常场景:Context设置CPU,绑核失败
HWTEST(MSLiteTest, OHOS_Context_CPU_0009, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 4);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 4);
OH_AI_ContextSetThreadAffinityMode(context, 3);
int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
ASSERT_EQ(thread_affinity_mode, 3);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR);
OH_AI_ModelDestroy(&model);
}
// 正常场景:Context设置CPU,绑核列表{0,1,2,3}
HWTEST(MSLiteTest, OHOS_Context_CPU_0010, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 4);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 4);
constexpr size_t core_num = 4;
int32_t core_list[core_num] = {0, 1, 2, 3};
OH_AI_ContextSetThreadAffinityCoreList(context, core_list, core_num);
size_t ret_core_num;
const int32_t *ret_core_list = nullptr;
ret_core_list = OH_AI_ContextGetThreadAffinityCoreList(context, &ret_core_num);
ASSERT_EQ(ret_core_num, core_num);
for (size_t i = 0; i < ret_core_num; i++) {
printf("==========ret_core_list:%d\n", ret_core_list[i]);
ASSERT_EQ(ret_core_list[i], core_list[i]);
}
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,绑核列表和模式同时开启
HWTEST(MSLiteTest, OHOS_Context_CPU_0011, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 4);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 4);
OH_AI_ContextSetThreadAffinityMode(context, 1);
int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
constexpr size_t core_num = 4;
int32_t core_list[core_num] = {0, 1, 3, 4};
OH_AI_ContextSetThreadAffinityCoreList(context, core_list, core_num);
size_t ret_core_num;
const int32_t *ret_core_list = nullptr;
ret_core_list = OH_AI_ContextGetThreadAffinityCoreList(context, &ret_core_num);
ASSERT_EQ(ret_core_num, core_num);
for (size_t i = 0; i < ret_core_num; i++) {
printf("==========ret_core_list:%d\n", ret_core_list[i]);
ASSERT_EQ(ret_core_list[i], core_list[i]);
}
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,开启并行
HWTEST(MSLiteTest, OHOS_Context_CPU_0012, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 4);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 4);
OH_AI_ContextSetEnableParallel(context, true);
bool is_parallel = OH_AI_ContextGetEnableParallel(context);
printf("==========is_parallel:%d\n", is_parallel);
ASSERT_EQ(is_parallel, true);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,关闭并行
HWTEST(MSLiteTest, OHOS_Context_CPU_0013, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_ContextSetThreadNum(context, 4);
int32_t thread_num = OH_AI_ContextGetThreadNum(context);
printf("==========thread_num:%d\n", thread_num);
ASSERT_EQ(thread_num, 4);
OH_AI_ContextSetEnableParallel(context, false);
bool is_parallel = OH_AI_ContextGetEnableParallel(context);
printf("==========is_parallel:%d\n", is_parallel);
ASSERT_EQ(is_parallel, false);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,开启fp16
HWTEST(MSLiteTest, OHOS_Context_CPU_0014, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
ASSERT_NE(cpu_device_info, nullptr);
OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, true);
bool is_fp16 = OH_AI_DeviceInfoGetEnableFP16(cpu_device_info);
printf("==========is_fp16:%d\n", is_fp16);
ASSERT_EQ(is_fp16, true);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,关闭fp16
HWTEST(MSLiteTest, OHOS_Context_CPU_0015, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
ASSERT_NE(cpu_device_info, nullptr);
OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false);
bool is_fp16 = OH_AI_DeviceInfoGetEnableFP16(cpu_device_info);
printf("==========is_fp16:%d\n", is_fp16);
ASSERT_EQ(is_fp16, false);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,设置厂商名称
HWTEST(MSLiteTest, OHOS_Context_CPU_0016, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
ASSERT_NE(cpu_device_info, nullptr);
OH_AI_DeviceInfoSetProvider(cpu_device_info, "vendor_new");
ASSERT_EQ(strcmp(OH_AI_DeviceInfoGetProvider(cpu_device_info), "vendor_new"), 0);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,设置厂商设备类型
HWTEST(MSLiteTest, OHOS_Context_CPU_0017, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
ASSERT_NE(cpu_device_info, nullptr);
OH_AI_DeviceInfoSetProviderDevice(cpu_device_info, "cpu_new");
ASSERT_EQ(strcmp(OH_AI_DeviceInfoGetProviderDevice(cpu_device_info), "cpu_new"), 0);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:Context设置CPU,销毁MSDeviceInfo
HWTEST(MSLiteTest, OHOS_Context_CPU_0018, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
ASSERT_NE(cpu_device_info, nullptr);
OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(cpu_device_info);
printf("==========device_type:%d\n", device_type);
ASSERT_EQ(device_type, OH_AI_DEVICETYPE_CPU);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
OH_AI_DeviceInfoDestroy(&cpu_device_info);
ASSERT_EQ(cpu_device_info, nullptr);
}
// 正常场景:Context设置CPU,销毁OH_AI_Context
HWTEST(MSLiteTest, OHOS_Context_CPU_0019, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
ASSERT_NE(cpu_device_info, nullptr);
OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(cpu_device_info);
printf("==========device_type:%d\n", device_type);
ASSERT_EQ(device_type, OH_AI_DEVICETYPE_CPU);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
OH_AI_ContextDestroy(&context);
ASSERT_EQ(context, nullptr);
}
// 异常场景:Context不设置device info
HWTEST(MSLiteTest, OHOS_Context_CPU_0020, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR);
OH_AI_ModelDestroy(&model);
}
// 正常场景:ModelBuild,调用指针方法
HWTEST(MSLiteTest, OHOS_Model_Build_0001, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, true, true, false);
}
// 异常场景:ModelBuild,model_data指向的不是模型数据
HWTEST(MSLiteTest, OHOS_Model_Build_0002, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuild(model, imageBuf, size1, OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
delete[] imageBuf;
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelBuild,model_data为空
HWTEST(MSLiteTest, OHOS_Model_Build_0003, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuild(model, nullptr, 0, OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR);
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelBuild,data_size为0
HWTEST(MSLiteTest, OHOS_Model_Build_0004, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size;
size_t *ptr_size = &size;
const char *graphPath = "/data/test/ml_face_isface.ms";
char *graphBuf = ReadFile(graphPath, ptr_size);
ASSERT_NE(graphBuf, nullptr);
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuild(model, graphBuf, 0, OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
delete[] graphBuf;
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelBuild,读取路径方法,且路径不是模型数据路径
HWTEST(MSLiteTest, OHOS_Model_Build_0005, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.input", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelBuild,读取路径方法,路径为空
HWTEST(MSLiteTest, OHOS_Model_Build_0006, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelBuild,model_type不支持
HWTEST(MSLiteTest, OHOS_Model_Build_0007, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_INVALID, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_PARAM_INVALID);
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelBuild,model_context为空
HWTEST(MSLiteTest, OHOS_Model_Build_0008, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, nullptr);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR);
OH_AI_ModelDestroy(&model);
}
// 正常场景:ModelResize,shape与之前一致
HWTEST(MSLiteTest, OHOS_Model_Resize_0001, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_ocr_cn", {4, {1, 32, 512, 1}}, false, true, false);
}
// 正常场景:ModelResize,shape与之前不一致
HWTEST(MSLiteTest, OHOS_Model_Resize_0002, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
printf("==========Resizes==========\n");
OH_AI_ShapeInfo shape_infos = {4, {1, 64, 256, 1}};
OH_AI_Status resize_ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
printf("==========Resizes return code:%d\n", resize_ret);
ASSERT_EQ(resize_ret, OH_AI_STATUS_SUCCESS);
FillInputsData(inputs, "ml_ocr_cn", false);
OH_AI_TensorHandleArray outputs;
printf("==========Model Predict==========\n");
OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
}
// 异常场景:ModelResize,shape为三维
HWTEST(MSLiteTest, OHOS_Model_Resize_0003, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
printf("==========Resizes==========\n");
OH_AI_ShapeInfo shape_infos = {4, {1, 32, 1}};
ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
printf("==========Resizes return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelResize,shape值有负数
HWTEST(MSLiteTest, OHOS_Model_Resize_0004, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
printf("==========Resizes==========\n");
OH_AI_ShapeInfo shape_infos = {4, {1, -32, 32, 1}};
ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
printf("==========Resizes return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelResize,不支持resize的模型
HWTEST(MSLiteTest, OHOS_Model_Resize_0005, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
printf("==========Resizes==========\n");
OH_AI_ShapeInfo shape_infos = {4, {1, 96, 96, 1}};
ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
printf("==========Resizes return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
OH_AI_ModelDestroy(&model);
}
// 正常场景:ModelPredict
HWTEST(MSLiteTest, OHOS_Model_Predict_0001, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 异常场景:ModelPredict,model被销毁
HWTEST(MSLiteTest, OHOS_Model_Predict_0002, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
FillInputsData(inputs, "ml_face_isface", true);
printf("==========Model Predict==========\n");
OH_AI_TensorHandleArray outputs;
OH_AI_ModelDestroy(&model);
ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
printf("==========Model Predict return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR);
}
// 异常场景:ModelPredict,input为空
HWTEST(MSLiteTest, OHOS_Model_Predict_0003, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========Model Predict==========\n");
OH_AI_TensorHandleArray inputs;
OH_AI_TensorHandleArray outputs;
ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
printf("==========Model Predict return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
OH_AI_ModelDestroy(&model);
}
// 正常场景:ModelPredict,传入回调函数
HWTEST(MSLiteTest, OHOS_Model_Predict_0004, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, true);
}
// 正常场景:ModelGetInputByTensorName
HWTEST(MSLiteTest, OHOS_Model_GetInputByTensorName_0001, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandle tensor = OH_AI_ModelGetInputByTensorName(model, "data");
ASSERT_NE(tensor, nullptr);
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
float *input_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
ASSERT_NE(input_data, nullptr);
printf("==========Transpose==========\n");
size_t shape_num;
const int64_t *shape = OH_AI_TensorGetShape(tensor, &shape_num);
auto imageBuf_nhwc = new char[size1];
PackNCHWToNHWCFp32(imageBuf, imageBuf_nhwc, shape[0], shape[1] * shape[2], shape[3]);
memcpy(input_data, imageBuf_nhwc, size1);
printf("input data is:");
for (int j = 0; j < element_num && j <= 20; ++j) {
printf("%f ", input_data[j]);
}
printf("\n");
printf("==========Model Predict==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
OH_AI_TensorHandleArray outputs;
ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
CompareResult(outputs, "ml_face_isface");
delete[] imageBuf;
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelGetInputByTensorName,名称不存在
HWTEST(MSLiteTest, OHOS_Model_GetInputByTensorName_0002, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandle tensor = OH_AI_ModelGetInputByTensorName(model, "aaa");
ASSERT_EQ(tensor, nullptr);
OH_AI_ModelDestroy(&model);
}
// 正常场景:ModelGetOutputByTensorName
HWTEST(MSLiteTest, OHOS_Model_GetOutputByTensorName_0001, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
FillInputsData(inputs, "ml_face_isface", true);
printf("==========Model Predict==========\n");
OH_AI_TensorHandleArray outputs;
ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetOutput==========\n");
OH_AI_TensorHandle tensor = OH_AI_ModelGetOutputByTensorName(model, "prob");
ASSERT_NE(tensor, nullptr);
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
float *output_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
printf("output data is:");
for (int j = 0; j < element_num && j <= 20; ++j) {
printf("%f ", output_data[j]);
}
printf("\n");
printf("==========compFp32WithTData==========\n");
bool result = compFp32WithTData(output_data, "/data/test/ml_face_isface0.output", 0.01, 0.01, false);
EXPECT_EQ(result, true);
OH_AI_ModelDestroy(&model);
}
// 异常场景:ModelGetOutputByTensorName,名称不存在
HWTEST(MSLiteTest, OHOS_Model_GetOutputByTensorName_0002, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
FillInputsData(inputs, "ml_face_isface", true);
printf("==========Model Predict==========\n");
OH_AI_TensorHandleArray outputs;
ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetOutput==========\n");
OH_AI_TensorHandle tensor = OH_AI_ModelGetOutputByTensorName(model, "aaa");
ASSERT_EQ(tensor, nullptr);
OH_AI_ModelDestroy(&model);
}
// 正常场景:MSTensorCreate,创建tensor
HWTEST(MSLiteTest, OHOS_Tensor_Create_0001, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, nullptr, static_cast<int>(1 * 48 * 48 * 3 * sizeof(float)));
ASSERT_NE(tensor, nullptr);
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
inputs.handle_list[0] = tensor;
FillInputsData(inputs, "ml_face_isface", true);
printf("==========Model Predict==========\n");
OH_AI_TensorHandleArray outputs;
ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
CompareResult(outputs, "ml_face_isface");
OH_AI_ModelDestroy(&model);
}
// 正常场景:MSTensorDestroy,销毁tensor
HWTEST(MSLiteTest, OHOS_Tensor_Create_0002, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
ASSERT_EQ(tensor, nullptr);
}
// 正常场景:MSTensorGetName,获取tensor名称
HWTEST(MSLiteTest, OHOS_Tensor_Create_0003, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
const char *tensor_name = OH_AI_TensorGetName(tensor);
ASSERT_EQ(strcmp(tensor_name, "data"), 0);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorGetName,设置tensor名称
HWTEST(MSLiteTest, OHOS_Tensor_Create_0004, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
OH_AI_TensorSetName(tensor, "new_data");
const char *tensor_name = OH_AI_TensorGetName(tensor);
ASSERT_EQ(strcmp(tensor_name, "new_data"), 0);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorGetDataType,获取tensor数据类型
HWTEST(MSLiteTest, OHOS_Tensor_Create_0005, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
OH_AI_DataType data_type = OH_AI_TensorGetDataType(tensor);
ASSERT_EQ(data_type, OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorSetDataType,设置tensor数据类型
HWTEST(MSLiteTest, OHOS_Tensor_Create_0006, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
OH_AI_TensorSetDataType(tensor, OH_AI_DATATYPE_NUMBERTYPE_FLOAT16);
OH_AI_DataType data_type = OH_AI_TensorGetDataType(tensor);
ASSERT_EQ(data_type, OH_AI_DATATYPE_NUMBERTYPE_FLOAT16);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorGetShape,获取tensor维度
HWTEST(MSLiteTest, OHOS_Tensor_Create_0007, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
size_t ret_shape_num;
const int64_t *ret_shape = OH_AI_TensorGetShape(tensor, &ret_shape_num);
ASSERT_EQ(ret_shape_num, create_shape_num);
for (size_t i = 0; i < ret_shape_num; i++) {
ASSERT_EQ(ret_shape[i], create_shape[i]);
}
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorSetShape,设置tensor维度
HWTEST(MSLiteTest, OHOS_Tensor_Create_0008, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
size_t ret_shape_num;
const int64_t *ret_shape = OH_AI_TensorGetShape(tensor, &ret_shape_num);
ASSERT_EQ(ret_shape_num, create_shape_num);
for (size_t i = 0; i < ret_shape_num; i++) {
ASSERT_EQ(ret_shape[i], create_shape[i]);
}
constexpr size_t new_shape_num = 4;
int64_t new_shape[new_shape_num] = {1, 32, 32, 1};
OH_AI_TensorSetShape(tensor, new_shape, new_shape_num);
size_t new_ret_shape_num;
const int64_t *new_ret_shape = OH_AI_TensorGetShape(tensor, &new_ret_shape_num);
ASSERT_EQ(new_ret_shape_num, new_shape_num);
for (size_t i = 0; i < new_ret_shape_num; i++) {
ASSERT_EQ(new_ret_shape[i], new_shape[i]);
}
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorGetFormat,获取tensor格式
HWTEST(MSLiteTest, OHOS_Tensor_Create_0009, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
OH_AI_Format data_format = OH_AI_TensorGetFormat(tensor);
ASSERT_EQ(data_format, OH_AI_FORMAT_NCHW);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorSetFormat,设置tensor格式
HWTEST(MSLiteTest, OHOS_Tensor_Create_0010, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
OH_AI_TensorSetFormat(tensor, OH_AI_FORMAT_NHWC);
OH_AI_Format data_format = OH_AI_TensorGetFormat(tensor);
ASSERT_EQ(data_format, OH_AI_FORMAT_NHWC);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorGetData,获取tensor数据
HWTEST(MSLiteTest, OHOS_Tensor_Create_0011, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
const float *ret_data = static_cast<const float *>(OH_AI_TensorGetData(tensor));
ASSERT_NE(ret_data, nullptr);
printf("return data is:");
for (int i = 0; i < 20; ++i) {
printf("%f ", ret_data[i]);
}
printf("\n");
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorSetData,设置tensor数据
HWTEST(MSLiteTest, OHOS_Tensor_Create_0012, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
constexpr size_t data_len = 6;
float data[data_len] = {1, 2, 3, 4, 5, 6};
OH_AI_TensorSetData(tensor, data);
const float *ret_data = static_cast<const float *>(OH_AI_TensorGetData(tensor));
ASSERT_NE(ret_data, nullptr);
printf("return data is:");
for (size_t i = 0; i < data_len; i++) {
ASSERT_EQ(ret_data[i], data[i]);
printf("%f ", ret_data[i]);
}
printf("\n");
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorGetElementNum,获取tensor元素
HWTEST(MSLiteTest, OHOS_Tensor_Create_0013, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
ASSERT_EQ(element_num, 6912);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorGetDataSize,获取tensor大小
HWTEST(MSLiteTest, OHOS_Tensor_Create_0014, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
size_t data_size = OH_AI_TensorGetDataSize(tensor);
printf("Tensor data size: %zu.\n", data_size);
ASSERT_EQ(data_size, 6912 * sizeof(float));
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorGetMutableData,获取tensor可变数据指针
HWTEST(MSLiteTest, OHOS_Tensor_Create_0015, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
float *input_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
ASSERT_NE(input_data, nullptr);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
}
// 正常场景:MSTensorClone,拷贝tensor
HWTEST(MSLiteTest, OHOS_Tensor_Create_0016, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/ml_face_isface.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========OH_AI_TensorCreate==========\n");
constexpr size_t create_shape_num = 4;
int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
create_shape_num, imageBuf, size1);
ASSERT_NE(tensor, nullptr);
OH_AI_TensorHandle clone = OH_AI_TensorClone(tensor);
ASSERT_NE(clone, nullptr);
ASSERT_EQ(strcmp(OH_AI_TensorGetName(clone), "data_duplicate"), 0);
delete[] imageBuf;
OH_AI_TensorDestroy(&tensor);
OH_AI_TensorDestroy(&clone);
}
// 正常场景:单输入模型
HWTEST(MSLiteTest, OHOS_Input_0001, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:多输入模型
HWTEST(MSLiteTest, OHOS_Input_0002, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_headpose_pb2tflite", {}, false, false, false);
}
// 正常场景:输入为uint8模型
HWTEST(MSLiteTest, OHOS_Input_0003, Function | MediumTest | Level1) {
printf("==========ReadFile==========\n");
size_t size1;
size_t *ptr_size1 = &size1;
const char *imagePath = "/data/test/aiy_vision_classifier_plants_V1_3.input";
char *imageBuf = ReadFile(imagePath, ptr_size1);
ASSERT_NE(imageBuf, nullptr);
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/aiy_vision_classifier_plants_V1_3.ms", OH_AI_MODELTYPE_MINDIR,
context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
for (size_t i = 0; i < inputs.handle_num; ++i) {
OH_AI_TensorHandle tensor = inputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
void *input_data = OH_AI_TensorGetMutableData(inputs.handle_list[i]);
ASSERT_NE(input_data, nullptr);
memcpy(input_data, imageBuf, size1);
}
printf("==========Model Predict==========\n");
OH_AI_TensorHandleArray outputs;
ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetOutput==========\n");
for (size_t i = 0; i < outputs.handle_num; ++i) {
OH_AI_TensorHandle tensor = outputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
uint8_t *output_data = reinterpret_cast<uint8_t *>(OH_AI_TensorGetMutableData(tensor));
printf("output data is:");
for (int j = 0; j < element_num && j <= 20; ++j) {
printf("%d ", output_data[j]);
}
printf("\n");
printf("==========compFp32WithTData==========\n");
string expectedDataFile = "/data/test/aiy_vision_classifier_plants_V1_3" + std::to_string(i) + ".output";
bool result = compUint8WithTData(output_data, expectedDataFile, 0.01, 0.01, false);
EXPECT_EQ(result, true);
}
delete[] imageBuf;
OH_AI_ModelDestroy(&model);
}
// 正常场景:量化模型
HWTEST(MSLiteTest, OHOS_Input_0004, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
}
// 正常场景:循环多次执行推理流程
HWTEST(MSLiteTest, OHOS_Multiple_0001, Function | MediumTest | Level1) {
for (size_t num = 0; num < 50; ++num) {
Predict_CPU();
}
}
// 异常场景:Model创建一次,Build多次
HWTEST(MSLiteTest, OHOS_Multiple_0002, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========Build model==========\n");
OH_AI_Status ret2 = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret2);
ASSERT_EQ(ret2, OH_AI_STATUS_SUCCESS);
OH_AI_ModelDestroy(&model);
}
// 正常场景:Model创建一次,Build一次,Predict多次
HWTEST(MSLiteTest, OHOS_Multiple_0003, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
FillInputsData(inputs, "ml_face_isface", true);
OH_AI_TensorHandleArray outputs;
for (size_t i = 0; i < 50; ++i) {
printf("==========Model Predict==========\n");
OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
}
CompareResult(outputs, "ml_face_isface");
OH_AI_ModelDestroy(&model);
}
// 正常场景:多次创建和销毁Model
HWTEST(MSLiteTest, OHOS_Multiple_0004, Function | MediumTest | Level1) {
for (size_t i = 0; i < 50; ++i) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========Build model==========\n");
OH_AI_ModelDestroy(&model);
}
}
// 正常场景:两个模型都在CPU上并行推理
HWTEST(MSLiteTest, OHOS_Parallel_0001, Function | MediumTest | Level1) {
std::cout << "run start" << std::endl;
std::thread t1(Predict_CPU);
std::cout << "1111111111111" << std::endl;
std::thread t2(Predict_CPU);
std::cout << "2222222222222" << std::endl;
t1.join();
t2.join();
}
// 正常场景:r1.3转换的模型在r1.5上推理
HWTEST(MSLiteTest, OHOS_Compatible_0001, Function | MediumTest | Level1) {
printf("==========Init Context==========\n");
OH_AI_ContextHandle context = OH_AI_ContextCreate();
ASSERT_NE(context, nullptr);
AddContextDeviceCPU(context);
printf("==========Create model==========\n");
OH_AI_ModelHandle model = OH_AI_ModelCreate();
ASSERT_NE(model, nullptr);
printf("==========Build model==========\n");
OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface_r13.ms",
OH_AI_MODELTYPE_MINDIR, context);
printf("==========build model return code:%d\n", ret);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
printf("==========GetInputs==========\n");
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
ASSERT_NE(inputs.handle_list, nullptr);
FillInputsData(inputs, "ml_face_isface", true);
printf("==========Model Predict==========\n");
OH_AI_TensorHandleArray outputs;
ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
CompareResult(outputs, "ml_face_isface");
OH_AI_ModelDestroy(&model);
}
\ No newline at end of file
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ohos_common.h"
#include <numeric>
#include <inttypes.h>
/*
* getDimInfo: get dim info from data file(int64_t)
* param:
* fp: the testing datafile object
*
* return :
* dim_info: array to store the info of the dim in datafile, like
* [4,3,3,6,3,162(3*3*6*3)],4 is dim size,3,3,6,3 is the dim shape data_size:
* the size of the testing data including the data file
* */
void getDimInfo(FILE *fp, std::vector<int64_t>* dim_info) {
const int MAX_HEAD_SIZE = 50;
uint32_t *dim_buffer = reinterpret_cast<uint32_t *>(malloc(MAX_HEAD_SIZE * sizeof(uint32_t)));
size_t ret = fread(dim_buffer, sizeof(uint32_t), MAX_HEAD_SIZE, fp);
if (ret == 0) {
free(dim_buffer);
return;
}
dim_info->push_back(*dim_buffer); // get dim size
// get data shape to compute the datasize
uint64_t data_size = 1;
uint32_t i = 1;
for (; i <= dim_info->at(0); i++) {
dim_info->push_back(*(dim_buffer + i));
data_size *= *(dim_buffer + i);
}
dim_info->push_back(data_size);
free(dim_buffer);
}
/*
* readTestDataFile: read test date from hisi .t datafile(int64_t)
* param:
* infile: the path of hisi .t datafile
* return:
* dim_info: array to store the info of the dim in datafile, like [4,3,3,6,3],4
* is dim size,3,3,6,3 is the dim shape
* */
void *readTestDataFile(std::string infile, std::vector<int64_t>* dim_info1) {
printf("\n [common.cpp] Loading data from: %s\n", infile.c_str());
FILE *fp;
fp = fopen(infile.c_str(), "r");
if (fp == nullptr) {
printf("ERROR: cant't open file %s\n", infile.c_str());
return nullptr;
} else {
std::vector<int64_t> dim_info;
std::vector<int64_t>* ptr_dim_info = &dim_info;
getDimInfo(fp, ptr_dim_info);
uint64_t data_size = ptr_dim_info->at(ptr_dim_info->size() - 1);
fclose(fp);
fp = fopen(infile.c_str(), "r");
if (fp == nullptr) {
printf("ERROR: cant't open file %s\n", infile.c_str());
return nullptr;
}
uint32_t *memory = reinterpret_cast<uint32_t *>(malloc((dim_info[0] + 1) * sizeof(uint32_t)));
size_t ret = fread(memory, sizeof(uint32_t), (dim_info[0] + 1), fp);
if (ret == 0) {
free(memory);
fclose(fp);
return nullptr;
}
uint32_t *data = reinterpret_cast<uint32_t *>(malloc((data_size) * sizeof(uint32_t)));
size_t ret2 = fread(data, sizeof(uint32_t), data_size, fp);
if (ret2 == 0) {
free(data);
fclose(fp);
return nullptr;
}
free(memory);
fclose(fp);
for (int i = 0; i < dim_info[0]; i++) {
dim_info1->push_back(dim_info[i + 1]);
}
printf("\n [common.cpp] Read test data file Over, get dimInfo as: (");
int count = dim_info1->size();
for (int i = 0; i < count; i++) {
printf("%" PRId64, dim_info1->at(i));
}
printf(")\n");
return data;
}
}
/*
* allclose
* param:
* a:compared file a
* b:compared file b
* count: the count size which will compare
* rtol:
* atol:
* return:
* true or false
* */
bool allclose(float *a, float *b, uint64_t count, float rtol = 1e-05,
float atol = 1e-08, bool isquant = false) {
uint32_t i = 0;
// add fail loop print
uint32_t fail_count = 0;
float tol = 0;
float tol1 = 0;
float tol2 = 0;
bool nan_occur_in_accuray = false;
float sum = 0.0f;
static float sum_all;
static float maximum = 0;
static float minimum = 0;
static uint64_t c = 0;
if (a == nullptr || b == nullptr) {
return false;
}
for (; i < count; ++i) {
sum = sum + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]));
sum_all = sum_all + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]));
maximum = max(maximum, fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i])));
minimum = min(minimum, fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i])));
if (isnan(a[i]) || isinf(a[i])) {
fail_count = fail_count + 1;
nan_occur_in_accuray = true;
if (fail_count < 100) {
printf(" i = %2u: %+f | %+f\n", i, a[i], b[i]);
}
} else if (fabs(a[i] - b[i]) > (atol + rtol * fabs(b[i]))) {
tol = tol + fabs(a[i] - b[i]) / (fabs(b[i]) + 1);
tol1 = tol1 + fabs(a[i] - b[i]);
tol2 = tol2 + fabs(a[i] - b[i]) / fabs(b[i]);
fail_count = fail_count + 1;
if (fail_count < 100) {
printf(" i = %2u: %+f | %+f\n", i, a[i], b[i]);
}
}
if (i == count - 1) {
printf(" ……\n");
printf("\n *** Total fail_count: %u\n", fail_count);
printf("\n fabs(a[i] - b[i])/(fabs(b[i])+1) : %f\n",
tol / fail_count);
printf("\n fabs(a[i] - b[i]) : %f\n", tol1 / fail_count);
printf("\n fabs(a[i] - b[i])/fabs(b[i]) : %f\n", tol2 / fail_count);
c = c + count;
printf("\n avg : %f\n", sum / count);
printf("\n min : %f\n", minimum);
printf("\n max : %f\n", maximum);
printf("\n avg_all : %f\n", sum_all / c);
printf("\n");
fstream file;
file.open("cout.csv", ios::app);
file << ","
<< "1,"
<< "0," << maximum;
if (fail_count == 0) {
file << "," << sum_all / c;
} else {
file << "," << tol / fail_count;
}
file.close();
}
}
if (nan_occur_in_accuray) {
printf("\n[common.cpp] eval output include some NAN/INF\n");
return false;
}
if (fail_count > 0) {
printf("\n *** These data compare failed: atol = %f, rtol = %f\n", atol,
rtol);
printf("\n");
if (isquant) {
if (tol / fail_count < 0.04) {
return true;
}
}
return false;
}
return true;
}
bool allclose_int8(uint8_t *a, uint8_t *b, uint64_t count, float rtol = 1e-05,
float atol = 1e-08, bool isquant = false) {
uint32_t i = 0;
// add fail loop print
uint32_t fail_count = 0;
float tol = 0;
float tol1 = 0;
float tol2 = 0;
bool nan_occur_in_accuray = false;
float sum = 0.0f;
static float sum_all;
static float maximum = 0;
static float minimum = 0;
static uint64_t c = 0;
// add fail loop print
if (a == nullptr || b == nullptr) {
return false;
}
for (; i < count; ++i) {
sum = sum + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]));
sum_all = sum_all + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]));
maximum = max(static_cast<double>(maximum),
static_cast<double>(fabs(a[i] - b[i])) / (atol + rtol * fabs(b[i])));
minimum = min(static_cast<double>(minimum),
static_cast<double>(fabs(a[i] - b[i])) / (atol + rtol * fabs(b[i])));
if (isnan(a[i]) || isinf(a[i])) {
fail_count = fail_count + 1;
nan_occur_in_accuray = true;
if (fail_count < 100) {
printf(" i = %2u: %+f | %+f\n", i, static_cast<float>(a[i]), static_cast<float>(b[i]));
}
} else if (fabs(a[i] - b[i]) > 0) {
tol = tol + fabs(a[i] - b[i]) / (fabs(b[i]) + 1);
tol1 = tol1 + fabs(a[i] - b[i]);
tol2 = tol2 + fabs(a[i] - b[i]) / fabs(b[i]);
fail_count = fail_count + 1;
printf("%2d", static_cast<int>(fabs(a[i] - b[i])));
printf(" i = %2u: %2d | %2d\n", i, a[i], b[i]);
}
if (i == count - 1) {
printf(" ……\n");
printf("\n *** Total fail_count: %u\n", fail_count);
printf("\n fabs(a[i] - b[i])/(fabs(b[i])+1) : %f\n",
tol / fail_count);
printf("\n fabs(a[i] - b[i]) : %f\n", tol1 / fail_count);
printf("\n fabs(a[i] - b[i])/fabs(b[i]) : %f\n", tol2 / fail_count);
c = c + count;
printf("\n avg : %f\n", sum / count);
printf("\n min : %f\n", minimum);
printf("\n max : %f\n", maximum);
printf("\n avg_all : %f\n", sum_all / c);
printf("\n");
fstream file;
file.open("cout.csv", ios::app);
file << ","
<< "1,"
<< "0," << maximum;
if (fail_count == 0) {
file << "," << sum_all / c;
} else {
file << "," << tol / fail_count;
}
file.close();
}
}
if (nan_occur_in_accuray) {
printf("\n[common.cpp] eval output include some NAN/INF\n");
return false;
}
if (fail_count > 0) {
printf("\n *** These data compare failed: atol = %f, rtol = %f\n", atol,
rtol);
printf("\n");
if (isquant) {
if (tol / fail_count < 0.04) {
return true;
}
}
return false;
}
return true;
}
/*
* compFp32WithTData: compare the data with the data in hisi .t file
* param:
* actualOutputData: the result of ge
* expectedDataFile: the path of hisi .t result file
* rtol:
* atol:
* return:
* true of false
* */
bool compFp32WithTData(float *actualOutputData, const std::string& expectedDataFile,
float rtol = 1e-05, float atol = 1e-08,
bool isquant = false) {
std::vector<int64_t> dim_info;
std::vector<int64_t>* ptr_dim_info = &dim_info;
float *expectedOutputData =
reinterpret_cast<float *>(readTestDataFile(expectedDataFile, ptr_dim_info));
uint32_t i = 0;
uint64_t data_size = 1;
data_size = accumulate(dim_info.begin(), dim_info.end(), 1, std::multiplies<uint64_t>());
// print caffe/tf output:
printf("[common.cpp] expected output data:");
for (; i < data_size && i < 10; i++) {
printf("%4f ", expectedOutputData[i]);
}
printf("\n");
if (isquant) {
return allclose(actualOutputData, expectedOutputData, data_size, rtol, atol,
true);
}
return allclose(actualOutputData, expectedOutputData, data_size, rtol, atol);
}
bool compUint8WithTData(uint8_t *actualOutputData, const std::string& expectedDataFile,
float rtol = 1e-05, float atol = 1e-08,
bool isquant = false) {
std::vector<int64_t> dim_info;
std::vector<int64_t>* ptr_dim_info = &dim_info;
auto dataFile = readTestDataFile(expectedDataFile, ptr_dim_info);
if(dataFile == nullptr){
return false;
}
uint8_t *expectedOutputData =
reinterpret_cast<uint8_t *>(dataFile);
uint32_t i = 0;
uint64_t data_size = 1;
data_size = accumulate(dim_info.begin(), dim_info.end(), 1, std::multiplies<uint64_t>());
// print caffe/tf output:
printf("\n [common.cpp] expected output data:\n");
for (; i < data_size && i < 10; i++) {
printf("%4hhu ", static_cast<unsigned char>(expectedOutputData[i]));
}
printf("\n");
if (isquant) {
return allclose_int8(actualOutputData, expectedOutputData, data_size, rtol,
atol, true);
}
return allclose_int8(actualOutputData, expectedOutputData, data_size, rtol,
atol);
}
/*
* ReadFile: read file of model
* param:
* file: file location
* size: file size
* return:
* buf of file
* */
char *ReadFile(const char *file, size_t* size) {
printf("[common.cpp] Loading data from: %s\n", file);
std::ifstream ifs(file);
if (!ifs.good()) {
return nullptr;
}
if (!ifs.is_open()) {
ifs.close();
return nullptr;
}
ifs.seekg(0, std::ios::end);
*size = ifs.tellg();
char *buf = new char[*size];
if (buf == nullptr) {
ifs.close();
return nullptr;
}
ifs.seekg(0, std::ios::beg);
ifs.read(buf, *size);
ifs.close();
printf("[common.cpp]Read Binary Data Over, get tensorSize as: %" PRId64 ".\n", static_cast<int64_t>(*size));
return buf;
}
void PackNCHWToNHWCFp32(const char *src, char *dst, int batch, int plane, int channel) {
for (int n = 0; n < batch; n++) {
for (int c = 0; c < channel; c++) {
for (int hw = 0; hw < plane; hw++) {
int nhwc_index = n * channel * plane + hw * channel + c;
int nchw_index = n * channel * plane + c * plane + hw;
dst[nhwc_index * 4] = src[nchw_index * 4];
dst[nhwc_index * 4 + 1] = src[nchw_index * 4 + 1];
dst[nhwc_index * 4 + 2] = src[nchw_index * 4 + 2];
dst[nhwc_index * 4 + 3] = src[nchw_index * 4 + 3];
}
}
}
return;
}
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GE_COMMON_H
#define GE_COMMON_H
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <map>
#include <math.h>
using std::string;
using std::cout;
using std::endl;
using std::map;
using std::ios;
using std::fstream;
using std::to_string;
using std::ifstream;
using std::stringstream;
using std::max;
using std::min;
using std::vector;
// using namespace mindspore::tensor;
bool compFp32WithTData(float *actualOutputData,
const std::string &expectedDataFile, float rtol,
float atol, bool isquant);
bool allclose_int8(int8_t *a, int8_t *b, uint64_t count, float rtol, float atol,
bool isquant);
bool compUint8WithTData(uint8_t *actualOutputData,
const std::string &expectedDataFile, float rtol,
float atol, bool isquant);
//// add for mslite test of int64:
void getDimInfo(FILE *fp, std::vector<int64_t>* dim_info);
char *ReadFile(const char *file, size_t* size);
void PackNCHWToNHWCFp32(const char *src, char *dst, int batch, int plane, int channel);
#endif // GE_COMMON_H
\ No newline at end of file
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/c_api/tensor_c.h"
#include "gtest/gtest.h"
using namespace testing::ext;
class TensorTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
/**
* @tc.number :
* @tc.name :
* @tc.desc :
*/
HWTEST(TensorTest, TestCase_0001, Function | MediumTest | Level1) {
constexpr size_t shape_num = 2;
int64_t shape[shape_num] = {2, 3};
OH_AI_TensorHandle tensor =
OH_AI_TensorCreate("name001", OH_AI_DATATYPE_NUMBERTYPE_INT32, shape, shape_num, nullptr, 0);
EXPECT_TRUE(tensor != nullptr);
EXPECT_STREQ(OH_AI_TensorGetName(tensor), "name001");
EXPECT_EQ(OH_AI_TensorGetDataType(tensor), OH_AI_DATATYPE_NUMBERTYPE_INT32);
size_t ret_shape_num;
const int64_t *ret_shape = OH_AI_TensorGetShape(tensor, &ret_shape_num);
EXPECT_EQ(ret_shape_num, shape_num);
for (size_t i = 0; i < ret_shape_num; i++) {
EXPECT_EQ(ret_shape[i], shape[i]);
}
EXPECT_EQ(OH_AI_TensorGetElementNum(tensor), 6);
EXPECT_EQ(OH_AI_TensorGetDataSize(tensor), 6 * sizeof(int32_t));
EXPECT_EQ(OH_AI_TensorGetData(tensor), nullptr);
EXPECT_TRUE(OH_AI_TensorGetMutableData(tensor) != nullptr);
OH_AI_TensorSetName(tensor, "name002");
EXPECT_STREQ(OH_AI_TensorGetName(tensor), "name002");
OH_AI_TensorSetDataType(tensor, OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
EXPECT_EQ(OH_AI_TensorGetDataType(tensor), OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
constexpr size_t new_shape_num = 4;
int64_t new_shape[new_shape_num] = {1, 2, 3, 1};
OH_AI_TensorSetShape(tensor, new_shape, new_shape_num);
size_t new_ret_shape_num;
const int64_t *new_ret_shape = OH_AI_TensorGetShape(tensor, &new_ret_shape_num);
EXPECT_EQ(new_ret_shape_num, new_shape_num);
for (size_t i = 0; i < new_ret_shape_num; i++) {
EXPECT_EQ(new_ret_shape[i], new_shape[i]);
}
OH_AI_TensorSetFormat(tensor, OH_AI_FORMAT_NCHW);
EXPECT_EQ(OH_AI_TensorGetFormat(tensor), OH_AI_FORMAT_NCHW);
constexpr size_t data_len = 6;
EXPECT_EQ(OH_AI_TensorGetElementNum(tensor), data_len);
EXPECT_EQ(OH_AI_TensorGetDataSize(tensor), data_len * sizeof(float));
float data[data_len] = {1, 2, 3, 4, 5, 6};
OH_AI_TensorSetData(tensor, data);
const float *ret_data = static_cast<const float *>(OH_AI_TensorGetData(tensor));
for (size_t i = 0; i < data_len; i++) {
EXPECT_EQ(ret_data[i], data[i]);
}
OH_AI_TensorHandle clone = OH_AI_TensorClone(tensor);
EXPECT_TRUE(clone != nullptr);
EXPECT_STREQ(OH_AI_TensorGetName(clone), "name002_duplicate");
EXPECT_EQ(OH_AI_TensorGetDataType(clone), OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
size_t clone_shape_num;
const int64_t *clone_shape = OH_AI_TensorGetShape(clone, &clone_shape_num);
EXPECT_EQ(clone_shape_num, new_ret_shape_num);
for (size_t i = 0; i < clone_shape_num; i++) {
EXPECT_EQ(clone_shape[i], new_ret_shape[i]);
}
EXPECT_EQ(OH_AI_TensorGetElementNum(clone), OH_AI_TensorGetElementNum(tensor));
EXPECT_EQ(OH_AI_TensorGetDataSize(clone), OH_AI_TensorGetDataSize(tensor));
EXPECT_TRUE(OH_AI_TensorGetData(clone) != OH_AI_TensorGetData(tensor));
OH_AI_TensorDestroy(&tensor);
OH_AI_TensorDestroy(&clone);
}
...@@ -97,7 +97,6 @@ lite_component("acts_component") { ...@@ -97,7 +97,6 @@ lite_component("acts_component") {
"//test/xts/acts/appexecfwk_lite/appexecfwk_posix:ActsBundleMgrTest", "//test/xts/acts/appexecfwk_lite/appexecfwk_posix:ActsBundleMgrTest",
"//test/xts/acts/ability_lite/ability_posix:ActsAbilityMgrTest", "//test/xts/acts/ability_lite/ability_posix:ActsAbilityMgrTest",
"//test/xts/acts/ai_lite/ai_engine_posix/base:ActsAiEngineTest", "//test/xts/acts/ai_lite/ai_engine_posix/base:ActsAiEngineTest",
"//test/xts/acts/ai/mindspore:ActsMindSporeTest",
] ]
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册