提交 83d33c99 编写于 作者: T TonyWang222

add ms xts tests

Change-Id: Ia6db5a7f6b37a14331ecc71a1eaa8e2f3565adff
Signed-off-by: NTonyWang222 <wangtongyu6@huawei.com>
上级 1969b9e0
......@@ -15,6 +15,6 @@ group("ai") {
testonly = true
deps = [ "neural_network_runtime:neural_network_runtime" ]
if (is_standard_system) {
deps += ["mindspore:ActsMindSporeTest"]
deps += [ "mindspore:ActsMindSporeTest" ]
}
}
......@@ -13,14 +13,13 @@
import("//test/xts/tools/build/suite.gni")
module_output_path = "hits/ActsMindSporeTest"
module_output_path = "acts/ActsMindSporeTest"
ohos_moduletest_suite("ActsMindSporeTest") {
module_out_path = module_output_path
sources = [
"ndk_test/context_test.cc",
"ndk_test/model_test.cc",
"ndk_test/tensor_test.cc",
"src/ohos_c_api_test_mslite.cpp",
"src/ohos_common.cpp",
]
include_dirs = [ "//foundation/ai/mindspore/" ]
......@@ -30,4 +29,8 @@ ohos_moduletest_suite("ActsMindSporeTest") {
"//third_party/googletest:gtest",
"//third_party/mindspore/mindspore/lite:mindspore_lib",
]
defines = [ "__STDC_FORMAT_MACROS" ]
subsystem_name = "ai"
part_name = "mindspore"
}
......@@ -8,21 +8,37 @@
"type": "CppTest"
},
"kits": [
{
"post-push" : [
"chmod -R 777 /data/local/tmp/*"
],
"push": [
"ActsMindSporeTest->/data/local/tmp/ActsMindSporeTest"
],
"type": "PushKit"
},
{
"type": "ShellKit",
"run-command": [
"remount",
"mkdir /data/test"
]
},
{
"type": "PushKit",
"push": [
"ActsMindSporeTest->/data/local/tmp/ActsMindSporeTest",
"resources/ai/mindspore/aiy_vision_classifier_plants_V1_3/aiy_vision_classifier_plants_V1_3.ms -> /data/test",
"resources/ai/mindspore/aiy_vision_classifier_plants_V1_3/aiy_vision_classifier_plants_V1_3.input -> /data/test",
"resources/ai/mindspore/aiy_vision_classifier_plants_V1_3/aiy_vision_classifier_plants_V1_30.output -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isface.ms -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isfacer13.ms -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isface.input -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isface_0.input -> /data/test",
"resources/ai/mindspore/ml_face_is_face/ml_face_isface0.output -> /data/test",
"resources/ai/mindspore/ml_Hand_deploy/ml_Hand_deploy.ms -> /data/test",
"resources/ai/mindspore/ml_Hand_deploy/ml_Hand_deploy_0.input -> /data/test",
"resources/ai/mindspore/ml_Hand_deploy/ml_Hand_deploy0.output -> /data/test",
"resources/ai/mindspore/ml_ocr_cn/ml_ocr_cn_0.input -> /data/test",
"resources/ai/mindspore/ml_ocr_cn/ml_ocr_cn.ms -> /data/test",
"resources/ai/mindspore/ml_ocr_cn/ml_ocr_cn0.output -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite.ms -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite_0.input -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite_1.input -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite_2.input -> /data/test",
"resources/ai/mindspore/ml_headpose_pb2tflite/ml_headpose_pb2tflite0.output -> /data/test"
]
}
]
}
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/c_api/context_c.h"
#include "gtest/gtest.h"
using namespace testing::ext;
class ContextTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
HWTEST(ContextTest, TestCase_0001, Function | MediumTest | Level1) {
OH_AI_DeviceInfoHandle npu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_KIRIN_NPU);
EXPECT_TRUE(npu_device_info != nullptr);
EXPECT_EQ(OH_AI_DeviceInfoGetDeviceType(npu_device_info), OH_AI_DEVICETYPE_KIRIN_NPU);
OH_AI_DeviceInfoSetProvider(npu_device_info, "vendor name");
EXPECT_STREQ(OH_AI_DeviceInfoGetProvider(npu_device_info), "vendor name");
OH_AI_DeviceInfoSetProviderDevice(npu_device_info, "npu_a");
EXPECT_STREQ(OH_AI_DeviceInfoGetProviderDevice(npu_device_info), "npu_a");
OH_AI_DeviceInfoSetFrequency(npu_device_info, 3);
EXPECT_EQ(OH_AI_DeviceInfoGetFrequency(npu_device_info), 3);
OH_AI_ContextHandle context = OH_AI_ContextCreate();
EXPECT_TRUE(context != nullptr);
OH_AI_ContextSetThreadNum(context, 4);
EXPECT_EQ(OH_AI_ContextGetThreadNum(context), 4);
OH_AI_ContextSetThreadAffinityMode(context, 2);
EXPECT_EQ(OH_AI_ContextGetThreadAffinityMode(context), 2);
constexpr size_t core_num = 4;
int32_t core_list[core_num] = {1, 3, 2, 0};
OH_AI_ContextSetThreadAffinityCoreList(context, core_list, core_num);
size_t ret_core_num;
const int32_t *ret_core_list = nullptr;
ret_core_list = OH_AI_ContextGetThreadAffinityCoreList(context, &ret_core_num);
EXPECT_EQ(ret_core_num, core_num);
for (size_t i = 0; i < ret_core_num; i++) {
EXPECT_EQ(ret_core_list[i], core_list[i]);
}
OH_AI_ContextSetEnableParallel(context, true);
EXPECT_EQ(OH_AI_ContextGetEnableParallel(context), true);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
OH_AI_DeviceInfoDestroy(&cpu_device_info);
cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, true);
EXPECT_EQ(OH_AI_DeviceInfoGetEnableFP16(cpu_device_info), true);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
OH_AI_ContextAddDeviceInfo(context, npu_device_info);
OH_AI_ContextDestroy(&context);
}
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fstream>
#include "include/c_api/model_c.h"
#include "gtest/gtest.h"
using namespace testing::ext;
const char* MODEL_FILE_PATH = "/data/test/mobilenetv2.ms";
std::map<std::string, std::pair<int, float>> g_op_times_by_type_;
std::map<std::string, std::pair<int, float>> g_op_times_by_name_;
#ifdef __cplusplus
extern "C" {
#endif
uint64_t g_op_begin_ = 0;
int g_op_call_times_total_ = 0;
float g_op_cost_total_ = 0.0f;
const int USEC = 1000000;
const int MSEC = 1000;
uint64_t GetTimeUs() {
struct timespec ts = {0, 0};
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
return 0;
}
auto ret_val = static_cast<uint64_t>((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC));
return ret_val;
}
bool TimeBeforeCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
const OH_AI_CallBackParam kernel_Info) {
if (g_op_times_by_type_.find(kernel_Info.node_type) == g_op_times_by_type_.end()) {
g_op_times_by_type_.insert(std::make_pair(kernel_Info.node_type, std::make_pair(0, 0.0f)));
}
if (g_op_times_by_name_.find(kernel_Info.node_name) == g_op_times_by_name_.end()) {
g_op_times_by_name_.insert(std::make_pair(kernel_Info.node_name, std::make_pair(0, 0.0f)));
}
g_op_call_times_total_++;
g_op_begin_ = GetTimeUs();
return true;
}
bool TimeAfterCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
const OH_AI_CallBackParam kernel_Info) {
uint64_t opEnd = GetTimeUs();
constexpr float kFloatMSEC = 1000.0f;
float cost = static_cast<float>(opEnd - g_op_begin_) / kFloatMSEC;
g_op_cost_total_ += cost;
g_op_times_by_type_[kernel_Info.node_type].first++;
g_op_times_by_type_[kernel_Info.node_type].second += cost;
g_op_times_by_name_[kernel_Info.node_name].first++;
g_op_times_by_name_[kernel_Info.node_name].second += cost;
return true;
}
#ifdef __cplusplus
}
#endif
class ModelTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
int GenerateInputDataWithRandom(OH_AI_TensorHandleArray inputs) {
for (size_t i = 0; i < inputs.handle_num; ++i) {
float *input_data = (float *)OH_AI_TensorGetMutableData(inputs.handle_list[i]);
if (input_data == NULL) {
printf("OH_AI_TensorGetMutableData failed.\n");
return OH_AI_STATUS_LITE_ERROR;
}
int64_t num = OH_AI_TensorGetElementNum(inputs.handle_list[i]);
const int divisor = 10;
for (int64_t j = 0; j < num; j++) {
input_data[j] = (float)(rand() % divisor) / divisor; // 0--0.9f
}
}
return OH_AI_STATUS_SUCCESS;
}
char *ReadFile(const char *file, size_t *size) {
std::fstream ifs;
ifs.open(file, std::ifstream::in | std::ifstream::binary);
if (!ifs.good() || !ifs.is_open()) {
return nullptr;
}
ifs.seekg(0, std::ios::end);
*size = ifs.tellg();
auto buf = std::make_unique<char[]>(*size);
EXPECT_NE(buf, nullptr);
ifs.seekg(0, std::ios::beg);
ifs.read(buf.get(), *size);
return buf.release();
}
HWTEST(ModelTest, TestCase_0001_build, Function | MediumTest | Level1) {
OH_AI_ContextHandle context = OH_AI_ContextCreate();
if (context == nullptr) {
printf("OH_AI_ContextCreate failed.\n");
EXPECT_NE(context, nullptr);
}
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
if (cpu_device_info == nullptr) {
printf("OH_AI_DeviceInfoCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(cpu_device_info, nullptr);
}
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
OH_AI_ModelHandle model = OH_AI_ModelCreate();
if (model == nullptr) {
printf("OH_AI_ModelCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(model, nullptr);
}
size_t size = 0;
auto buff = ReadFile(MODEL_FILE_PATH, &size);
EXPECT_NE(buff, nullptr);
int ret = OH_AI_ModelBuild(model, buff, size, OH_AI_MODELTYPE_MINDIR, context);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
OH_AI_ModelSetWorkspace(model, nullptr, 0);
OH_AI_ModelDestroy(&model);
}
HWTEST(ModelTest, TestCase_0002_predict, Function | MediumTest | Level1) {
// Create and init context, add CPU device info
OH_AI_ContextHandle context = OH_AI_ContextCreate();
if (context == nullptr) {
printf("OH_AI_ContextCreate failed.\n");
EXPECT_NE(context, nullptr);
}
const int thread_num = 2;
OH_AI_ContextSetThreadNum(context, thread_num);
OH_AI_ContextSetThreadAffinityMode(context, 1);
OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
if (cpu_device_info == nullptr) {
printf("OH_AI_DeviceInfoCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(cpu_device_info, nullptr);
}
OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false);
OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
// Create model
OH_AI_ModelHandle model = OH_AI_ModelCreate();
if (model == nullptr) {
printf("OH_AI_ModelCreate failed.\n");
OH_AI_ContextDestroy(&context);
EXPECT_NE(model, nullptr);
}
// Build model
int ret = OH_AI_ModelBuildFromFile(model, MODEL_FILE_PATH, OH_AI_MODELTYPE_MINDIR, context);
if (ret != OH_AI_STATUS_SUCCESS) {
printf("OH_AI_ModelBuildFromFile failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
}
// Get Inputs
OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
if (inputs.handle_list == nullptr) {
printf("OH_AI_ModelGetInputs failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_NE(inputs.handle_list, nullptr);
}
std::vector<OH_AI_ShapeInfo> shape_infos = {{4, {1, 112, 112, 3}}};
ret = OH_AI_ModelResize(model, inputs, shape_infos.data(), shape_infos.size());
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
// Generate random data as input data.
ret = GenerateInputDataWithRandom(inputs);
if (ret != OH_AI_STATUS_SUCCESS) {
printf("GenerateInputDataWithRandom failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
}
// Model Predict
OH_AI_TensorHandleArray outputs = OH_AI_ModelGetOutputs(model);
ret = OH_AI_ModelPredict(model, inputs, &outputs, TimeBeforeCallback, TimeAfterCallback);
if (ret != OH_AI_STATUS_SUCCESS) {
printf("OH_AI_ModelPredict failed, ret: %d.\n", ret);
OH_AI_ModelDestroy(&model);
EXPECT_EQ(ret, OH_AI_STATUS_SUCCESS);
}
// Print Input and Output Tensor Data.
for (size_t i = 0; i < inputs.handle_num; ++i) {
OH_AI_TensorHandle tensor = inputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s.\n", OH_AI_TensorGetName(tensor));
const float *data =
(const float *)OH_AI_TensorGetData(OH_AI_ModelGetInputByTensorName(model, OH_AI_TensorGetName(tensor)));
printf("input data is:\n");
const int max_print_num = 10;
for (int j = 0; j < element_num && j <= max_print_num; ++j) {
printf("%f ", data[i]);
}
printf("\n");
}
for (size_t i = 0; i < outputs.handle_num; ++i) {
OH_AI_TensorHandle tensor = outputs.handle_list[i];
int64_t element_num = OH_AI_TensorGetElementNum(tensor);
printf("Tensor name: %s.\n", OH_AI_TensorGetName(tensor));
const float *data =
(const float *)OH_AI_TensorGetData(OH_AI_ModelGetOutputByTensorName(model, OH_AI_TensorGetName(tensor)));
printf("output data is:\n");
const int max_print_num = 10;
for (int j = 0; j < element_num && j <= max_print_num; ++j) {
printf("%f ", data[i]);
}
printf("\n");
}
// Delete model.
OH_AI_ModelDestroy(&model);
EXPECT_EQ(model, nullptr);
}
此差异已折叠。
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ohos_common.h"
#include <numeric>
#include <inttypes.h>
/*
* getDimInfo: get dim info from data file(int64_t)
* param:
* fp: the testing datafile object
*
* return :
* dim_info: array to store the info of the dim in datafile, like
* [4,3,3,6,3,162(3*3*6*3)],4 is dim size,3,3,6,3 is the dim shape data_size:
* the size of the testing data including the data file
* */
void getDimInfo(FILE *fp, std::vector<int64_t>* dim_info) {
const int MAX_HEAD_SIZE = 50;
uint32_t *dim_buffer = reinterpret_cast<uint32_t *>(malloc(MAX_HEAD_SIZE * sizeof(uint32_t)));
size_t ret = fread(dim_buffer, sizeof(uint32_t), MAX_HEAD_SIZE, fp);
if (ret == 0) {
free(dim_buffer);
return;
}
dim_info->push_back(*dim_buffer); // get dim size
// get data shape to compute the datasize
uint64_t data_size = 1;
uint32_t i = 1;
for (; i <= dim_info->at(0); i++) {
dim_info->push_back(*(dim_buffer + i));
data_size *= *(dim_buffer + i);
}
dim_info->push_back(data_size);
free(dim_buffer);
}
/*
* readTestDataFile: read test date from hisi .t datafile(int64_t)
* param:
* infile: the path of hisi .t datafile
* return:
* dim_info: array to store the info of the dim in datafile, like [4,3,3,6,3],4
* is dim size,3,3,6,3 is the dim shape
* */
void *readTestDataFile(std::string infile, std::vector<int64_t>* dim_info1) {
printf("\n [common.cpp] Loading data from: %s\n", infile.c_str());
FILE *fp;
fp = fopen(infile.c_str(), "r");
if (fp == nullptr) {
printf("ERROR: cant't open file %s\n", infile.c_str());
return nullptr;
} else {
std::vector<int64_t> dim_info;
std::vector<int64_t>* ptr_dim_info = &dim_info;
getDimInfo(fp, ptr_dim_info);
uint64_t data_size = ptr_dim_info->at(ptr_dim_info->size() - 1);
fclose(fp);
fp = fopen(infile.c_str(), "r");
if (fp == nullptr) {
printf("ERROR: cant't open file %s\n", infile.c_str());
return nullptr;
}
uint32_t *memory = reinterpret_cast<uint32_t *>(malloc((dim_info[0] + 1) * sizeof(uint32_t)));
size_t ret = fread(memory, sizeof(uint32_t), (dim_info[0] + 1), fp);
if (ret == 0) {
free(memory);
fclose(fp);
return nullptr;
}
uint32_t *data = reinterpret_cast<uint32_t *>(malloc((data_size) * sizeof(uint32_t)));
size_t ret2 = fread(data, sizeof(uint32_t), data_size, fp);
if (ret2 == 0) {
free(data);
fclose(fp);
return nullptr;
}
free(memory);
fclose(fp);
for (int i = 0; i < dim_info[0]; i++) {
dim_info1->push_back(dim_info[i + 1]);
}
printf("\n [common.cpp] Read test data file Over, get dimInfo as: (");
int count = dim_info1->size();
for (int i = 0; i < count; i++) {
printf("%" PRId64, dim_info1->at(i));
}
printf(")\n");
return data;
}
}
/*
* allclose
* param:
* a:compared file a
* b:compared file b
* count: the count size which will compare
* rtol:
* atol:
* return:
* true or false
* */
bool allclose(float *a, float *b, uint64_t count, float rtol = 1e-05,
float atol = 1e-08, bool isquant = false) {
uint32_t i = 0;
// add fail loop print
uint32_t fail_count = 0;
float tol = 0;
float tol1 = 0;
float tol2 = 0;
bool nan_occur_in_accuray = false;
float sum = 0.0f;
static float sum_all;
static float maximum = 0;
static float minimum = 0;
static uint64_t c = 0;
if (a == nullptr || b == nullptr) {
return false;
}
for (; i < count; ++i) {
sum = sum + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]));
sum_all = sum_all + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]));
maximum = max(maximum, fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i])));
minimum = min(minimum, fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i])));
if (isnan(a[i]) || isinf(a[i])) {
fail_count = fail_count + 1;
nan_occur_in_accuray = true;
if (fail_count < 100) {
printf(" i = %2u: %+f | %+f\n", i, a[i], b[i]);
}
} else if (fabs(a[i] - b[i]) > (atol + rtol * fabs(b[i]))) {
tol = tol + fabs(a[i] - b[i]) / (fabs(b[i]) + 1);
tol1 = tol1 + fabs(a[i] - b[i]);
tol2 = tol2 + fabs(a[i] - b[i]) / fabs(b[i]);
fail_count = fail_count + 1;
if (fail_count < 100) {
printf(" i = %2u: %+f | %+f\n", i, a[i], b[i]);
}
}
if (i == count - 1) {
printf(" ……\n");
printf("\n *** Total fail_count: %u\n", fail_count);
printf("\n fabs(a[i] - b[i])/(fabs(b[i])+1) : %f\n",
tol / fail_count);
printf("\n fabs(a[i] - b[i]) : %f\n", tol1 / fail_count);
printf("\n fabs(a[i] - b[i])/fabs(b[i]) : %f\n", tol2 / fail_count);
c = c + count;
printf("\n avg : %f\n", sum / count);
printf("\n min : %f\n", minimum);
printf("\n max : %f\n", maximum);
printf("\n avg_all : %f\n", sum_all / c);
printf("\n");
fstream file;
file.open("cout.csv", ios::app);
file << ","
<< "1,"
<< "0," << maximum;
if (fail_count == 0) {
file << "," << sum_all / c;
} else {
file << "," << tol / fail_count;
}
file.close();
}
}
if (nan_occur_in_accuray) {
printf("\n[common.cpp] eval output include some NAN/INF\n");
return false;
}
if (fail_count > 0) {
printf("\n *** These data compare failed: atol = %f, rtol = %f\n", atol,
rtol);
printf("\n");
if (isquant) {
if (tol / fail_count < 0.04) {
return true;
}
}
return false;
}
return true;
}
bool allclose_int8(uint8_t *a, uint8_t *b, uint64_t count, float rtol = 1e-05,
float atol = 1e-08, bool isquant = false) {
uint32_t i = 0;
// add fail loop print
uint32_t fail_count = 0;
float tol = 0;
float tol1 = 0;
float tol2 = 0;
bool nan_occur_in_accuray = false;
float sum = 0.0f;
static float sum_all;
static float maximum = 0;
static float minimum = 0;
static uint64_t c = 0;
// add fail loop print
if (a == nullptr || b == nullptr) {
return false;
}
for (; i < count; ++i) {
sum = sum + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]));
sum_all = sum_all + fabs(a[i] - b[i]) / (atol + rtol * fabs(b[i]));
maximum = max(static_cast<double>(maximum),
static_cast<double>(fabs(a[i] - b[i])) / (atol + rtol * fabs(b[i])));
minimum = min(static_cast<double>(minimum),
static_cast<double>(fabs(a[i] - b[i])) / (atol + rtol * fabs(b[i])));
if (isnan(a[i]) || isinf(a[i])) {
fail_count = fail_count + 1;
nan_occur_in_accuray = true;
if (fail_count < 100) {
printf(" i = %2u: %+f | %+f\n", i, static_cast<float>(a[i]), static_cast<float>(b[i]));
}
} else if (fabs(a[i] - b[i]) > 0) {
tol = tol + fabs(a[i] - b[i]) / (fabs(b[i]) + 1);
tol1 = tol1 + fabs(a[i] - b[i]);
tol2 = tol2 + fabs(a[i] - b[i]) / fabs(b[i]);
fail_count = fail_count + 1;
printf("%2d", static_cast<int>(fabs(a[i] - b[i])));
printf(" i = %2u: %2d | %2d\n", i, a[i], b[i]);
}
if (i == count - 1) {
printf(" ……\n");
printf("\n *** Total fail_count: %u\n", fail_count);
printf("\n fabs(a[i] - b[i])/(fabs(b[i])+1) : %f\n",
tol / fail_count);
printf("\n fabs(a[i] - b[i]) : %f\n", tol1 / fail_count);
printf("\n fabs(a[i] - b[i])/fabs(b[i]) : %f\n", tol2 / fail_count);
c = c + count;
printf("\n avg : %f\n", sum / count);
printf("\n min : %f\n", minimum);
printf("\n max : %f\n", maximum);
printf("\n avg_all : %f\n", sum_all / c);
printf("\n");
fstream file;
file.open("cout.csv", ios::app);
file << ","
<< "1,"
<< "0," << maximum;
if (fail_count == 0) {
file << "," << sum_all / c;
} else {
file << "," << tol / fail_count;
}
file.close();
}
}
if (nan_occur_in_accuray) {
printf("\n[common.cpp] eval output include some NAN/INF\n");
return false;
}
if (fail_count > 0) {
printf("\n *** These data compare failed: atol = %f, rtol = %f\n", atol,
rtol);
printf("\n");
if (isquant) {
if (tol / fail_count < 0.04) {
return true;
}
}
return false;
}
return true;
}
/*
* compFp32WithTData: compare the data with the data in hisi .t file
* param:
* actualOutputData: the result of ge
* expectedDataFile: the path of hisi .t result file
* rtol:
* atol:
* return:
* true of false
* */
bool compFp32WithTData(float *actualOutputData, const std::string& expectedDataFile,
float rtol = 1e-05, float atol = 1e-08,
bool isquant = false) {
std::vector<int64_t> dim_info;
std::vector<int64_t>* ptr_dim_info = &dim_info;
float *expectedOutputData =
reinterpret_cast<float *>(readTestDataFile(expectedDataFile, ptr_dim_info));
uint32_t i = 0;
uint64_t data_size = 1;
data_size = accumulate(dim_info.begin(), dim_info.end(), 1, std::multiplies<uint64_t>());
// print caffe/tf output:
printf("[common.cpp] expected output data:");
for (; i < data_size && i < 10; i++) {
printf("%4f ", expectedOutputData[i]);
}
printf("\n");
if (isquant) {
return allclose(actualOutputData, expectedOutputData, data_size, rtol, atol,
true);
}
return allclose(actualOutputData, expectedOutputData, data_size, rtol, atol);
}
bool compUint8WithTData(uint8_t *actualOutputData, const std::string& expectedDataFile,
float rtol = 1e-05, float atol = 1e-08,
bool isquant = false) {
std::vector<int64_t> dim_info;
std::vector<int64_t>* ptr_dim_info = &dim_info;
auto dataFile = readTestDataFile(expectedDataFile, ptr_dim_info);
if(dataFile == nullptr){
return false;
}
uint8_t *expectedOutputData =
reinterpret_cast<uint8_t *>(dataFile);
uint32_t i = 0;
uint64_t data_size = 1;
data_size = accumulate(dim_info.begin(), dim_info.end(), 1, std::multiplies<uint64_t>());
// print caffe/tf output:
printf("\n [common.cpp] expected output data:\n");
for (; i < data_size && i < 10; i++) {
printf("%4hhu ", static_cast<unsigned char>(expectedOutputData[i]));
}
printf("\n");
if (isquant) {
return allclose_int8(actualOutputData, expectedOutputData, data_size, rtol,
atol, true);
}
return allclose_int8(actualOutputData, expectedOutputData, data_size, rtol,
atol);
}
/*
* ReadFile: read file of model
* param:
* file: file location
* size: file size
* return:
* buf of file
* */
char *ReadFile(const char *file, size_t* size) {
printf("[common.cpp] Loading data from: %s\n", file);
std::ifstream ifs(file);
if (!ifs.good()) {
return nullptr;
}
if (!ifs.is_open()) {
ifs.close();
return nullptr;
}
ifs.seekg(0, std::ios::end);
*size = ifs.tellg();
char *buf = new char[*size];
if (buf == nullptr) {
ifs.close();
return nullptr;
}
ifs.seekg(0, std::ios::beg);
ifs.read(buf, *size);
ifs.close();
printf("[common.cpp]Read Binary Data Over, get tensorSize as: %" PRId64 ".\n", static_cast<int64_t>(*size));
return buf;
}
void PackNCHWToNHWCFp32(const char *src, char *dst, int batch, int plane, int channel) {
for (int n = 0; n < batch; n++) {
for (int c = 0; c < channel; c++) {
for (int hw = 0; hw < plane; hw++) {
int nhwc_index = n * channel * plane + hw * channel + c;
int nchw_index = n * channel * plane + c * plane + hw;
dst[nhwc_index * 4] = src[nchw_index * 4];
dst[nhwc_index * 4 + 1] = src[nchw_index * 4 + 1];
dst[nhwc_index * 4 + 2] = src[nchw_index * 4 + 2];
dst[nhwc_index * 4 + 3] = src[nchw_index * 4 + 3];
}
}
}
return;
}
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GE_COMMON_H
#define GE_COMMON_H
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <map>
#include <math.h>
using std::string;
using std::cout;
using std::endl;
using std::map;
using std::ios;
using std::fstream;
using std::to_string;
using std::ifstream;
using std::stringstream;
using std::max;
using std::min;
using std::vector;
// using namespace mindspore::tensor;
bool compFp32WithTData(float *actualOutputData,
const std::string &expectedDataFile, float rtol,
float atol, bool isquant);
bool allclose_int8(int8_t *a, int8_t *b, uint64_t count, float rtol, float atol,
bool isquant);
bool compUint8WithTData(uint8_t *actualOutputData,
const std::string &expectedDataFile, float rtol,
float atol, bool isquant);
//// add for mslite test of int64:
void getDimInfo(FILE *fp, std::vector<int64_t>* dim_info);
char *ReadFile(const char *file, size_t* size);
void PackNCHWToNHWCFp32(const char *src, char *dst, int batch, int plane, int channel);
#endif // GE_COMMON_H
\ No newline at end of file
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/c_api/tensor_c.h"
#include "gtest/gtest.h"
using namespace testing::ext;
class TensorTest: public testing::Test {
protected:
static void SetUpTestCase(void) {}
static void TearDownTestCase(void) {}
virtual void SetUp() {}
virtual void TearDown() {}
};
/**
* @tc.number :
* @tc.name :
* @tc.desc :
*/
HWTEST(TensorTest, TestCase_0001, Function | MediumTest | Level1) {
constexpr size_t shape_num = 2;
int64_t shape[shape_num] = {2, 3};
OH_AI_TensorHandle tensor =
OH_AI_TensorCreate("name001", OH_AI_DATATYPE_NUMBERTYPE_INT32, shape, shape_num, nullptr, 0);
EXPECT_TRUE(tensor != nullptr);
EXPECT_STREQ(OH_AI_TensorGetName(tensor), "name001");
EXPECT_EQ(OH_AI_TensorGetDataType(tensor), OH_AI_DATATYPE_NUMBERTYPE_INT32);
size_t ret_shape_num;
const int64_t *ret_shape = OH_AI_TensorGetShape(tensor, &ret_shape_num);
EXPECT_EQ(ret_shape_num, shape_num);
for (size_t i = 0; i < ret_shape_num; i++) {
EXPECT_EQ(ret_shape[i], shape[i]);
}
EXPECT_EQ(OH_AI_TensorGetElementNum(tensor), 6);
EXPECT_EQ(OH_AI_TensorGetDataSize(tensor), 6 * sizeof(int32_t));
EXPECT_EQ(OH_AI_TensorGetData(tensor), nullptr);
EXPECT_TRUE(OH_AI_TensorGetMutableData(tensor) != nullptr);
OH_AI_TensorSetName(tensor, "name002");
EXPECT_STREQ(OH_AI_TensorGetName(tensor), "name002");
OH_AI_TensorSetDataType(tensor, OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
EXPECT_EQ(OH_AI_TensorGetDataType(tensor), OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
constexpr size_t new_shape_num = 4;
int64_t new_shape[new_shape_num] = {1, 2, 3, 1};
OH_AI_TensorSetShape(tensor, new_shape, new_shape_num);
size_t new_ret_shape_num;
const int64_t *new_ret_shape = OH_AI_TensorGetShape(tensor, &new_ret_shape_num);
EXPECT_EQ(new_ret_shape_num, new_shape_num);
for (size_t i = 0; i < new_ret_shape_num; i++) {
EXPECT_EQ(new_ret_shape[i], new_shape[i]);
}
OH_AI_TensorSetFormat(tensor, OH_AI_FORMAT_NCHW);
EXPECT_EQ(OH_AI_TensorGetFormat(tensor), OH_AI_FORMAT_NCHW);
constexpr size_t data_len = 6;
EXPECT_EQ(OH_AI_TensorGetElementNum(tensor), data_len);
EXPECT_EQ(OH_AI_TensorGetDataSize(tensor), data_len * sizeof(float));
float data[data_len] = {1, 2, 3, 4, 5, 6};
OH_AI_TensorSetData(tensor, data);
const float *ret_data = static_cast<const float *>(OH_AI_TensorGetData(tensor));
for (size_t i = 0; i < data_len; i++) {
EXPECT_EQ(ret_data[i], data[i]);
}
OH_AI_TensorHandle clone = OH_AI_TensorClone(tensor);
EXPECT_TRUE(clone != nullptr);
EXPECT_STREQ(OH_AI_TensorGetName(clone), "name002_duplicate");
EXPECT_EQ(OH_AI_TensorGetDataType(clone), OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
size_t clone_shape_num;
const int64_t *clone_shape = OH_AI_TensorGetShape(clone, &clone_shape_num);
EXPECT_EQ(clone_shape_num, new_ret_shape_num);
for (size_t i = 0; i < clone_shape_num; i++) {
EXPECT_EQ(clone_shape[i], new_ret_shape[i]);
}
EXPECT_EQ(OH_AI_TensorGetElementNum(clone), OH_AI_TensorGetElementNum(tensor));
EXPECT_EQ(OH_AI_TensorGetDataSize(clone), OH_AI_TensorGetDataSize(tensor));
EXPECT_TRUE(OH_AI_TensorGetData(clone) != OH_AI_TensorGetData(tensor));
OH_AI_TensorDestroy(&tensor);
OH_AI_TensorDestroy(&clone);
}
......@@ -97,7 +97,6 @@ lite_component("acts_component") {
"//test/xts/acts/appexecfwk_lite/appexecfwk_posix:ActsBundleMgrTest",
"//test/xts/acts/ability_lite/ability_posix:ActsAbilityMgrTest",
"//test/xts/acts/ai_lite/ai_engine_posix/base:ActsAiEngineTest",
"//test/xts/acts/ai/mindspore:ActsMindSporeTest",
]
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册