提交 1969b9e0 编写于 作者: T TonyWang222

add mslite xts

Change-Id: I0944729a3f3cebd3981e501aca4642898c896d8b
Signed-off-by: NTonyWang222 <wangtongyu6@huawei.com>
上级 82525981
......@@ -14,4 +14,7 @@
group("ai") {
testonly = true
deps = [ "neural_network_runtime:neural_network_runtime" ]
if (is_standard_system) {
deps += ["mindspore:ActsMindSporeTest"]
}
}
# Copyright (c) 2022 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import("//test/xts/tools/build/suite.gni")
module_output_path = "hits/ActsMindSporeTest"
ohos_moduletest_suite("ActsMindSporeTest") {
module_out_path = module_output_path
sources = [
"ndk_test/context_test.cc",
"ndk_test/model_test.cc",
"ndk_test/tensor_test.cc",
]
include_dirs = [ "//foundation/ai/mindspore/" ]
deps = [
"//third_party/googletest:gmock",
"//third_party/googletest:gtest",
"//third_party/mindspore/mindspore/lite:mindspore_lib",
]
}
{
"description": "Config for MindSpore test cases",
"driver": {
"module-name": "ActsMindSporeTest",
"native-test-timeout": "120000",
"native-test-device-path": "/data/local/tmp",
"runtime-hint": "1s",
"type": "CppTest"
},
"kits": [
{
"post-push" : [
"chmod -R 777 /data/local/tmp/*"
],
"push": [
"ActsMindSporeTest->/data/local/tmp/ActsMindSporeTest"
],
"type": "PushKit"
},
{
"type": "ShellKit",
"run-command": [
"remount",
"mkdir /data/test"
]
}
]
}
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/c_api/context_c.h"
#include "gtest/gtest.h"
using namespace testing::ext;
class ContextTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
HWTEST(ContextTest, TestCase_0001, Function | MediumTest | Level1) {
OH_AI_DeviceInfoHandle npu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_KIRIN_NPU);
EXPECT_TRUE(npu_device_info != nullptr);
EXPECT_EQ(OH_AI_DeviceInfoGetDeviceType(npu_device_info), OH_AI_DEVICETYPE_KIRIN_NPU);
OH_AI_DeviceInfoSetProvider(npu_device_info, "vendor name");
EXPECT_STREQ(OH_AI_DeviceInfoGetProvider(npu_device_info), "vendor name");
OH_AI_DeviceInfoSetProviderDevice(npu_device_info, "npu_a");
EXPECT_STREQ(OH_AI_DeviceInfoGetProviderDevice(npu_device_info), "npu_a");
OH_AI_DeviceInfoSetFrequency(npu_device_info, 3);
EXPECT_EQ(OH_AI_DeviceInfoGetFrequency(npu_device_info), 3);
OH_AI_ContextHandle context = OH_AI_ContextCreate();
EXPECT_TRUE(context != nullptr);
OH_AI_ContextSetThreadNum(context, 4);
EXPECT_EQ(OH_AI_ContextGetThreadNum(context), 4);
OH_AI_ContextSetThreadAffinityMode(context, 2);
EXPECT_EQ(OH_AI_ContextGetThreadAffinityMode(context), 2);
constexpr size_t core_num = 4;
int32_t core_list[core_num] = {1, 3, 2, 0};
OH_AI_ContextSetThreadAffinityCoreList(context, core_list, core_num);
size_t ret_core_num;
const int32_t *ret_core_list = nullptr;
ret_core_list = OH_AI_ContextGetThreadAffinityCoreList(context, &ret_core_num);
EXPECT_EQ(ret_core_num, core_num);
for (size_t i = 0; i < ret_core_num; i++) {
EXPECT_EQ(ret_core_list[i], core_list[i]);
}
OH_AI_ContextSetEnableParallel(context, true);
EXPECT_EQ(OH_AI_ContextGetEnableParallel(context), true);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
OH_AI_DeviceInfoDestroy(&cpu_device_info);
cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, true);
EXPECT_EQ(OH_AI_DeviceInfoGetEnableFP16(cpu_device_info), true);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
OH_AI_ContextAddDeviceInfo(context, npu_device_info);
OH_AI_ContextDestroy(&context);
}
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fstream>
#include "include/c_api/model_c.h"
#include "gtest/gtest.h"
using namespace testing::ext;
const char* MODEL_FILE_PATH = "/data/test/mobilenetv2.ms";
std::map<std::string, std::pair<int, float>> g_op_times_by_type_;
std::map<std::string, std::pair<int, float>> g_op_times_by_name_;
#ifdef __cplusplus
extern "C" {
#endif
uint64_t g_op_begin_ = 0;
int g_op_call_times_total_ = 0;
float g_op_cost_total_ = 0.0f;
const int USEC = 1000000;
const int MSEC = 1000;
uint64_t GetTimeUs() {
struct timespec ts = {0, 0};
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
return 0;
}
auto ret_val = static_cast<uint64_t>((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC));
return ret_val;
}
bool TimeBeforeCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
const OH_AI_CallBackParam kernel_Info) {
if (g_op_times_by_type_.find(kernel_Info.node_type) == g_op_times_by_type_.end()) {
g_op_times_by_type_.insert(std::make_pair(kernel_Info.node_type, std::make_pair(0, 0.0f)));
}
if (g_op_times_by_name_.find(kernel_Info.node_name) == g_op_times_by_name_.end()) {
g_op_times_by_name_.insert(std::make_pair(kernel_Info.node_name, std::make_pair(0, 0.0f)));
}
g_op_call_times_total_++;
g_op_begin_ = GetTimeUs();
return true;
}
bool TimeAfterCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
const OH_AI_CallBackParam kernel_Info) {
uint64_t opEnd = GetTimeUs();
constexpr float kFloatMSEC = 1000.0f;
float cost = static_cast<float>(opEnd - g_op_begin_) / kFloatMSEC;
g_op_cost_total_ += cost;
g_op_times_by_type_[kernel_Info.node_type].first++;
g_op_times_by_type_[kernel_Info.node_type].second += cost;
g_op_times_by_name_[kernel_Info.node_name].first++;
g_op_times_by_name_[kernel_Info.node_name].second += cost;
return true;
}
#ifdef __cplusplus
}
#endif
class ModelTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
int GenerateInputDataWithRandom(OH_AI_TensorHandleArray inputs) {
for (size_t i = 0; i < inputs.handle_num; ++i) {
float *input_data = (float *)OH_AI_TensorGetMutableData(inputs.handle_list[i]);
if (input_data == NULL) {
printf("OH_AI_TensorGetMutableData failed.\n");
return OH_AI_STATUS_LITE_ERROR;
}
int64_t num = OH_AI_TensorGetElementNum(inputs.handle_list[i]);
const int divisor = 10;
for (int64_t j = 0; j < num; j++) {
input_data[j] = (float)(rand() % divisor) / divisor; // 0--0.9f
}
}
return OH_AI_STATUS_SUCCESS;
}
char *ReadFile(const char *file, size_t *size) {
std::fstream ifs;
ifs.open(file, std::ifstream::in | std::ifstream::binary);
if (!ifs.good() || !ifs.is_open()) {
return nullptr;
}
ifs.seekg(0, std::ios::end);
*size = ifs.tellg();
auto buf = std::make_unique<char[]>(*size);
EXPECT_NE(buf, nullptr);
ifs.seekg(0, std::ios::beg);
ifs.read(buf.get(), *size);
return buf.release();
}
HWTEST(ModelTest, TestCase_0001_build, Function | MediumTest | Level1) {
OH_AI_ContextHandle context = OH_AI_ContextCreate();
if (context == nullptr) {
printf("OH_AI_ContextCreate failed.\n");
EXPECT_NE(context, nullptr);
}
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
if (cpu_device_info == nullptr) {
printf("OH_AI_DeviceInfoCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(cpu_device_info, nullptr);
}
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
OH_AI_ModelHandle model = OH_AI_ModelCreate();
if (model == nullptr) {
printf("OH_AI_ModelCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(model, nullptr);
}
size_t size = 0;
auto buff = ReadFile(MODEL_FILE_PATH, &size);
EXPECT_NE(buff, nullptr);
int ret = OH_AI_ModelBuild(model, buff, size, OH_AI_MODELTYPE_MINDIR, context);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
OH_AI_ModelSetWorkspace(model, nullptr, 0);
OH_AI_ModelDestroy(&model);
}
HWTEST(ModelTest, TestCase_0002_predict, Function | MediumTest | Level1) {
// Create and init context, add CPU device info
OH_AI_ContextHandle context = OH_AI_ContextCreate();
if (context == nullptr) {
printf("OH_AI_ContextCreate failed.\n");
EXPECT_NE(context, nullptr);
}
const int thread_num = 2;
OH_AI_ContextSetThreadNum(context, thread_num);
OH_AI_ContextSetThreadAffinityMode(context, 1);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
if (cpu_device_info == nullptr) {
printf("OH_AI_DeviceInfoCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(cpu_device_info, nullptr);
}
OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
// Create model
OH_AI_ModelHandle model = OH_AI_ModelCreate();
if (model == nullptr) {
printf("OH_AI_ModelCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(model, nullptr);
}
// Build model
int ret = OH_AI_ModelBuildFromFile(model, MODEL_FILE_PATH, OH_AI_MODELTYPE_MINDIR, context);
if (ret != OH_AI_STATUS_SUCCESS) {
printf("OH_AI_ModelBuildFromFile failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
}
// Get Inputs
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
if (inputs.handle_list == nullptr) {
printf("OH_AI_ModelGetInputs failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_NE(inputs.handle_list, nullptr);
}
std::vector<OH_AI_ShapeInfo> shape_infos = {{4, {1, 112, 112, 3}}};
ret = OH_AI_ModelResize(model, inputs, shape_infos.data(), shape_infos.size());
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
// Generate random data as input data.
ret = GenerateInputDataWithRandom(inputs);
if (ret != OH_AI_STATUS_SUCCESS) {
printf("GenerateInputDataWithRandom failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
}
// Model Predict
OH_AI_TensorHandleArray outputs = OH_AI_ModelGetOutputs(model);
ret = OH_AI_ModelPredict(model, inputs, &outputs, TimeBeforeCallback, TimeAfterCallback);
if (ret != OH_AI_STATUS_SUCCESS) {
printf("OH_AI_ModelPredict failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
}
// Print Input and Output Tensor Data.
for (size_t i = 0; i < inputs.handle_num; ++i) {
OH_AI_TensorHandle tensor = inputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s.\n", OH_AI_TensorGetName(tensor));
const float *data =
(const float *)OH_AI_TensorGetData(OH_AI_ModelGetInputByTensorName(model, OH_AI_TensorGetName(tensor)));
printf("input data is:\n");
const int max_print_num = 10;
for (int j = 0; j < element_num && j <= max_print_num; ++j) {
printf("%f ", data[i]);
}
printf("\n");
}
for (size_t i = 0; i < outputs.handle_num; ++i) {
OH_AI_TensorHandle tensor = outputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s.\n", OH_AI_TensorGetName(tensor));
const float *data =
(const float *)OH_AI_TensorGetData(OH_AI_ModelGetOutputByTensorName(model, OH_AI_TensorGetName(tensor)));
printf("output data is:\n");
const int max_print_num = 10;
for (int j = 0; j < element_num && j <= max_print_num; ++j) {
printf("%f ", data[i]);
}
printf("\n");
}
// Delete model.
OH_AI_ModelDestroy(&model);
EXPECT_EQ(model, nullptr);
}
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/c_api/tensor_c.h"
#include "gtest/gtest.h"
using namespace testing::ext;
class TensorTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
/**
* @tc.number :
* @tc.name :
* @tc.desc :
*/
HWTEST(TensorTest, TestCase_0001, Function | MediumTest | Level1) {
constexpr size_t shape_num = 2;
int64_t shape[shape_num] = {2, 3};
OH_AI_TensorHandle tensor =
OH_AI_TensorCreate("name001", OH_AI_DATATYPE_NUMBERTYPE_INT32, shape, shape_num, nullptr, 0);
EXPECT_TRUE(tensor != nullptr);
EXPECT_STREQ(OH_AI_TensorGetName(tensor), "name001");
EXPECT_EQ(OH_AI_TensorGetDataType(tensor), OH_AI_DATATYPE_NUMBERTYPE_INT32);
size_t ret_shape_num;
const int64_t *ret_shape = OH_AI_TensorGetShape(tensor, &ret_shape_num);
EXPECT_EQ(ret_shape_num, shape_num);
for (size_t i = 0; i < ret_shape_num; i++) {
EXPECT_EQ(ret_shape[i], shape[i]);
}
EXPECT_EQ(OH_AI_TensorGetElementNum(tensor), 6);
EXPECT_EQ(OH_AI_TensorGetDataSize(tensor), 6 * sizeof(int32_t));
EXPECT_EQ(OH_AI_TensorGetData(tensor), nullptr);
EXPECT_TRUE(OH_AI_TensorGetMutableData(tensor) != nullptr);
OH_AI_TensorSetName(tensor, "name002");
EXPECT_STREQ(OH_AI_TensorGetName(tensor), "name002");
OH_AI_TensorSetDataType(tensor, OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
EXPECT_EQ(OH_AI_TensorGetDataType(tensor), OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
constexpr size_t new_shape_num = 4;
int64_t new_shape[new_shape_num] = {1, 2, 3, 1};
OH_AI_TensorSetShape(tensor, new_shape, new_shape_num);
size_t new_ret_shape_num;
const int64_t *new_ret_shape = OH_AI_TensorGetShape(tensor, &new_ret_shape_num);
EXPECT_EQ(new_ret_shape_num, new_shape_num);
for (size_t i = 0; i < new_ret_shape_num; i++) {
EXPECT_EQ(new_ret_shape[i], new_shape[i]);
}
OH_AI_TensorSetFormat(tensor, OH_AI_FORMAT_NCHW);
EXPECT_EQ(OH_AI_TensorGetFormat(tensor), OH_AI_FORMAT_NCHW);
constexpr size_t data_len = 6;
EXPECT_EQ(OH_AI_TensorGetElementNum(tensor), data_len);
EXPECT_EQ(OH_AI_TensorGetDataSize(tensor), data_len * sizeof(float));
float data[data_len] = {1, 2, 3, 4, 5, 6};
OH_AI_TensorSetData(tensor, data);
const float *ret_data = static_cast<const float *>(OH_AI_TensorGetData(tensor));
for (size_t i = 0; i < data_len; i++) {
EXPECT_EQ(ret_data[i], data[i]);
}
OH_AI_TensorHandle clone = OH_AI_TensorClone(tensor);
EXPECT_TRUE(clone != nullptr);
EXPECT_STREQ(OH_AI_TensorGetName(clone), "name002_duplicate");
EXPECT_EQ(OH_AI_TensorGetDataType(clone), OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
size_t clone_shape_num;
const int64_t *clone_shape = OH_AI_TensorGetShape(clone, &clone_shape_num);
EXPECT_EQ(clone_shape_num, new_ret_shape_num);
for (size_t i = 0; i < clone_shape_num; i++) {
EXPECT_EQ(clone_shape[i], new_ret_shape[i]);
}
EXPECT_EQ(OH_AI_TensorGetElementNum(clone), OH_AI_TensorGetElementNum(tensor));
EXPECT_EQ(OH_AI_TensorGetDataSize(clone), OH_AI_TensorGetDataSize(tensor));
EXPECT_TRUE(OH_AI_TensorGetData(clone) != OH_AI_TensorGetData(tensor));
OH_AI_TensorDestroy(&tensor);
OH_AI_TensorDestroy(&clone);
}
......@@ -97,6 +97,7 @@ lite_component("acts_component") {
"//test/xts/acts/appexecfwk_lite/appexecfwk_posix:ActsBundleMgrTest",
"//test/xts/acts/ability_lite/ability_posix:ActsAbilityMgrTest",
"//test/xts/acts/ai_lite/ai_engine_posix/base:ActsAiEngineTest",
"//test/xts/acts/ai/mindspore:ActsMindSporeTest",
]
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册