From 79e2f061e405a6aba1f75a28f3d59781d695e688 Mon Sep 17 00:00:00 2001 From: tangshihua Date: Mon, 14 Nov 2022 19:51:28 +0800 Subject: [PATCH] =?UTF-8?q?=E9=9D=99=E6=80=81=E7=BC=96=E7=A0=81=E6=A3=80?= =?UTF-8?q?=E6=9F=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: tangshihua --- .../common/mock_idevice.cpp | 18 +++++----- .../common/mock_idevice.h | 6 ++-- .../common/nnrt_utils.cpp | 36 +++++++++---------- ai/neural_network_runtime/common/nnrt_utils.h | 5 +-- .../interface/src/CompileTest.cpp | 14 ++++---- .../interface/src/ExecutorTest.cpp | 10 +----- .../interface/src/MemoryTest.cpp | 12 ++----- 7 files changed, 43 insertions(+), 58 deletions(-) diff --git a/ai/neural_network_runtime/common/mock_idevice.cpp b/ai/neural_network_runtime/common/mock_idevice.cpp index 03d43392f..e5ffcd267 100644 --- a/ai/neural_network_runtime/common/mock_idevice.cpp +++ b/ai/neural_network_runtime/common/mock_idevice.cpp @@ -109,31 +109,31 @@ int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) return HDF_SUCCESS; } -int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector& ops) +int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector& ops) { ops = m_operations; return HDF_SUCCESS; } -int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported) +int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported) { isSupported = m_fp16; return HDF_SUCCESS; } -int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported) +int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported) { isSupported = m_performance; return HDF_SUCCESS; } -int32_t MockIDevice::IsPrioritySupported(bool& isSupported) +int32_t MockIDevice::IsPrioritySupported(bool& isSupported) { isSupported = m_priority; return HDF_SUCCESS; } -int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported) +int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported) { isSupported = m_dynamic; return HDF_SUCCESS; @@ -164,7 +164,7 @@ int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) buffer.dataSize = length; m_ashmems[buffer.fd] = ashptr; - m_buffer_fd = buffer.fd; + m_bufferFd = buffer.fd; return HDF_SUCCESS; } @@ -175,10 +175,10 @@ int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) return HDF_SUCCESS; } -int32_t MockIDevice::MemoryCopy(void *data, uint32_t length) +int32_t MockIDevice::MemoryCopy(float *data, uint32_t length) { auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance(); - auto memAddress = memManager->MapMemory(m_buffer_fd, length); + auto memAddress = memManager->MapMemory(m_bufferFd, length); if (memAddress == nullptr) { LOGE("[NNRtTest] Map fd to address failed."); return HDF_FAILURE; @@ -198,7 +198,7 @@ int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config, } int32_t MockIDevice::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, - sptr& preparedModel) +sptr& preparedModel) { preparedModel = new (std::nothrow) V1_0::MockIPreparedModel(); return HDF_SUCCESS; diff --git a/ai/neural_network_runtime/common/mock_idevice.h b/ai/neural_network_runtime/common/mock_idevice.h index 526ed5240..ca802c66a 100644 --- a/ai/neural_network_runtime/common/mock_idevice.h +++ b/ai/neural_network_runtime/common/mock_idevice.h @@ -70,7 +70,7 @@ public: int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, sptr& preparedModel) override; - int32_t MemoryCopy(void *data, uint32_t length); + int32_t MemoryCopy(float *data, uint32_t length); void SetFP16Supported(bool isSupported); @@ -91,7 +91,7 @@ public: private: std::unordered_map> m_ashmems; - int m_buffer_fd; + int m_bufferFd; bool m_fp16 = true; bool m_performance = true; bool m_priority = true; @@ -106,7 +106,7 @@ public: int32_t Run(const std::vector& inputs, const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) override; int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; - MockIPreparedModel() = default; + MockIPreparedModel() = default; }; } // namespace V1_0 diff --git a/ai/neural_network_runtime/common/nnrt_utils.cpp b/ai/neural_network_runtime/common/nnrt_utils.cpp index 76b4d444f..3d2ff06a3 100644 --- a/ai/neural_network_runtime/common/nnrt_utils.cpp +++ b/ai/neural_network_runtime/common/nnrt_utils.cpp @@ -35,8 +35,7 @@ int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs) const OHNNOperandTest &operandTem = graphArgs.operands[j][i]; auto quantParam = operandTem.quantParam; OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), - operandTem.shape.data(), - quantParam, operandTem.type}; + operandTem.shape.data(), quantParam, operandTem.type}; ret = OH_NNModel_AddTensor(model, &operand); if (ret != OH_NN_SUCCESS) { LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); @@ -57,7 +56,7 @@ int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs) auto outputIndices = TransformUInt32Array(graphArgs.outputIndices[j]); ret = OH_NNModel_AddOperation(model, graphArgs.operationTypes[j], ¶mIndices, &inputIndices, - &outputIndices); + &outputIndices); if (ret != OH_NN_SUCCESS) { LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret); return ret; @@ -85,8 +84,7 @@ int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs) const OHNNOperandTest &operandTem = graphArgs.operands[i]; auto quantParam = operandTem.quantParam; OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), - operandTem.shape.data(), - quantParam, operandTem.type}; + operandTem.shape.data(), quantParam, operandTem.type}; ret = OH_NNModel_AddTensor(model, &operand); if (ret != OH_NN_SUCCESS) { LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); @@ -159,7 +157,7 @@ int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &comp // set cache if (!compileParam.cacheDir.empty()) { ret = OH_NNCompilation_SetCache(compilation, compileParam.cacheDir.c_str(), - compileParam.cacheVersion); + compileParam.cacheVersion); if (ret != OH_NN_SUCCESS) { LOGE("[NNRtTest] OH_NNCompilation_SetCache failed! ret=%d\n", ret); return ret; @@ -196,7 +194,7 @@ int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &comp int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, - void *expect) +float* expect) { OHOS::sptr device = V1_0::MockIDevice::GetInstance(); int ret = 0; @@ -206,12 +204,12 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, const OHNNOperandTest &operandTem = graphArgs.operands[i]; auto quantParam = operandTem.quantParam; OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), - operandTem.shape.data(), - quantParam, operandTem.type}; + operandTem.shape.data(), + quantParam, operandTem.type}; if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != graphArgs.inputIndices.end()) { ret = OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, - operandTem.length); + operandTem.length); if (ret != OH_NN_SUCCESS) { LOGE("[NNRtTest] OH_NNExecutor_SetInput failed! ret=%d\n", ret); return ret; @@ -236,7 +234,7 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, return ret; } -int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], void *expect) +int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], float *expect) { OHOS::sptr device = V1_0::MockIDevice::GetInstance(); int ret = 0; @@ -246,12 +244,12 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, const OHNNOperandTest &operandTem = graphArgs.operands[i]; auto quantParam = operandTem.quantParam; OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), - operandTem.shape.data(), - quantParam, operandTem.type}; + operandTem.shape.data(), + quantParam, operandTem.type}; if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != graphArgs.inputIndices.end()) { OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, - operandTem.length); + operandTem.length); ret = OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory); if (ret != OH_NN_SUCCESS) { LOGE("[NNRtTest] OH_NNExecutor_SetInputWithMemory failed! ret=%d\n", ret); @@ -263,7 +261,7 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != graphArgs.outputIndices.end()) { OH_NN_Memory *outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex, - operandTem.length); + operandTem.length); ret = OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory); if (ret != OH_NN_SUCCESS) { LOGE("[NNRtTest] OH_NNExecutor_SetOutputWithMemory failed! ret=%d\n", ret); @@ -273,12 +271,11 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, if (ret != OH_NN_SUCCESS) { LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret); return ret; - } + } OHNNMemory[inputIndex + outputIndex] = outputMemory; outputIndex += 1; } } - ret = OH_NNExecutor_Run(executor); return ret; } @@ -407,6 +404,7 @@ bool CreateFolder(const std::string &path) return false; } LOGI("CreateFolder:%s", path.c_str()); + mode_t mode = 0700; for (int i = 1; i < path.size() - 1; i++) { if (path[i] != '/') { continue; @@ -417,14 +415,14 @@ bool CreateFolder(const std::string &path) continue; case PathType::NOT_FOUND: LOGI("mkdir: %s", path.substr(0, i).c_str()); - mkdir(path.substr(0, i).c_str(), 0700); + mkdir(path.substr(0, i).c_str(), mode); break; default: LOGI("error: %s", path.substr(0, i).c_str()); return false; } } - mkdir(path.c_str(), 0700); + mkdir(path.c_str(), mode); return CheckPath(path) == PathType::DIR; } diff --git a/ai/neural_network_runtime/common/nnrt_utils.h b/ai/neural_network_runtime/common/nnrt_utils.h index 088886a97..c00d95f87 100644 --- a/ai/neural_network_runtime/common/nnrt_utils.h +++ b/ai/neural_network_runtime/common/nnrt_utils.h @@ -69,13 +69,14 @@ struct OHNNCompileParam { int BuildSingleOpGraph(OH_NNModel *modelptr, const OHNNGraphArgs &args); -int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], void* expect); +int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], +float* expect); void Free(OH_NNModel *model = nullptr, OH_NNCompilation *compilation = nullptr, OH_NNExecutor *executor = nullptr); int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam); -int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, void* expect); +int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float * expect); int SetDevice(OH_NNCompilation *compilation); int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs); diff --git a/ai/neural_network_runtime/interface/src/CompileTest.cpp b/ai/neural_network_runtime/interface/src/CompileTest.cpp index dd7db7aa1..2ef9177e2 100644 --- a/ai/neural_network_runtime/interface/src/CompileTest.cpp +++ b/ai/neural_network_runtime/interface/src/CompileTest.cpp @@ -38,7 +38,8 @@ public: { DeleteFolder(CACHE_DIR); } - void GenCacheFile() { + void GenCacheFile() + { OH_NNModel *model = OH_NNModel_Construct(); ASSERT_NE(nullptr, model); ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); @@ -53,15 +54,16 @@ public: ASSERT_TRUE(CheckPath(CACHE_PATH) == PathType::FILE); ASSERT_TRUE(CheckPath(CACHE_INFO_PATH) == PathType::FILE); } - void DestroyCache() { + void DestroyCache() + { std::ifstream ifs(CACHE_PATH.c_str(), std::ios::in | std::ios::binary); char* ptr{nullptr}; - int cache_size = ifs.tellg(); - int invalid_cache_size = cache_size * 0.9; - ifs.read(ptr, cache_size); + int cacheSize = ifs.tellg(); + int invalidCacheSize = cacheSize * 0.9; + ifs.read(ptr, cacheSize); ifs.close(); std::ofstream ofs(CACHE_PATH.c_str(), std::ios::out | std::ios::binary); - ofs.write(ptr, invalid_cache_size); + ofs.write(ptr, invalidCacheSize); ofs.close(); } diff --git a/ai/neural_network_runtime/interface/src/ExecutorTest.cpp b/ai/neural_network_runtime/interface/src/ExecutorTest.cpp index a5abf525b..175570d91 100644 --- a/ai/neural_network_runtime/interface/src/ExecutorTest.cpp +++ b/ai/neural_network_runtime/interface/src/ExecutorTest.cpp @@ -28,14 +28,6 @@ using namespace OHOS::HDI::Nnrt::V1_0; namespace { class ExecutorTest : public testing::Test { -public: - void SetUp() - { - } - void TearDown() - { - } - protected: OHOS::sptr device; AddModel addModel; @@ -43,7 +35,7 @@ protected: OHNNCompileParam compileParam; }; -void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, void* expect) +void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float* expect) { ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, expect)); } diff --git a/ai/neural_network_runtime/interface/src/MemoryTest.cpp b/ai/neural_network_runtime/interface/src/MemoryTest.cpp index 5688c123a..eb54fbf0c 100644 --- a/ai/neural_network_runtime/interface/src/MemoryTest.cpp +++ b/ai/neural_network_runtime/interface/src/MemoryTest.cpp @@ -29,14 +29,6 @@ using namespace OHOS::HDI::Nnrt::V1_0; namespace { class MemoryTest : public testing::Test { -public: - void SetUp() - { - } - void TearDown() - { - } - protected: AddModel addModel; OHNNGraphArgs graphArgs = addModel.graphArgs; @@ -854,7 +846,7 @@ HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0300, Function | for (auto j = 0; j < graphArgs.outputIndices.size(); j++) { auto outputIndex = graphArgs.inputIndices.size() + j; // check memory output - EXPECT_TRUE(CheckOutput(static_cast(const_cast(OHNNMemory[outputIndex]->data)), + EXPECT_TRUE(CheckOutput(static_cast(const_cast(OHNNMemory[outputIndex]->data)), (float*) addModel.expectValue)); OH_NNExecutor_DestroyOutputMemory(executor, j, &OHNNMemory[outputIndex]); ASSERT_EQ(OHNNMemory[outputIndex], nullptr); @@ -898,7 +890,7 @@ HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0400, Function | for (auto j = 0; j < graphArgs.outputIndices.size(); j++) { auto outputIndex = graphArgs.inputIndices.size() + j; // check memory output - EXPECT_TRUE(CheckOutput(static_cast(const_cast(OHNNMemory[outputIndex]->data)), + EXPECT_TRUE(CheckOutput(static_cast(const_cast(OHNNMemory[outputIndex]->data)), (float*) avgModel.expectValue)); OH_NNExecutor_DestroyOutputMemory(executor, j, &OHNNMemory[outputIndex]); ASSERT_EQ(OHNNMemory[outputIndex], nullptr); -- GitLab