diff --git a/ai/neural_network_runtime/common/mock_idevice.cpp b/ai/neural_network_runtime/common/mock_idevice.cpp index 650c77aaf3c8342b2b193fc1921b83d8d71ba014..04ea181edac6b7e599fced8504b4bc2866c34289 100644 --- a/ai/neural_network_runtime/common/mock_idevice.cpp +++ b/ai/neural_network_runtime/common/mock_idevice.cpp @@ -147,6 +147,7 @@ int32_t MockIDevice::IsModelCacheSupported(bool& isSupported) int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) { + std::lock_guard lock(m_mtx); sptr ashptr = Ashmem::CreateAshmem("allocateBuffer", length); if (ashptr == nullptr) { LOGE("[NNRtTest] Create shared memory failed."); @@ -170,6 +171,7 @@ int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) { + std::lock_guard lock(m_mtx); auto ash = m_ashmems[buffer.fd]; ash->UnmapAshmem(); return HDF_SUCCESS; @@ -177,6 +179,7 @@ int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) int32_t MockIDevice::MemoryCopy(float* data, uint32_t length) { + std::lock_guard lock(m_mtx); auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance(); auto memAddress = memManager->MapMemory(m_bufferFd, length); if (memAddress == nullptr) { diff --git a/ai/neural_network_runtime/common/mock_idevice.h b/ai/neural_network_runtime/common/mock_idevice.h index 3c2d8e4d1186b1c7691df813ce6d824e4512046f..4f5887cf2209a2d2ee64a5b2e325acd652fc64bf 100644 --- a/ai/neural_network_runtime/common/mock_idevice.h +++ b/ai/neural_network_runtime/common/mock_idevice.h @@ -97,7 +97,7 @@ private: bool m_priority = true; bool m_cache = true; bool m_dynamic = true; - std::vector m_operations{true}; + std::mutex m_mtx; }; class MockIPreparedModel : public IPreparedModel { diff --git a/ai/neural_network_runtime/common/nnrt_utils.cpp b/ai/neural_network_runtime/common/nnrt_utils.cpp index 8692dffc3c73f8f24f82fc263cbb73ba60fc8e7c..01f10ffed2c4fb21191de68f6809e517d1c072ba 100644 --- a/ai/neural_network_runtime/common/nnrt_utils.cpp +++ b/ai/neural_network_runtime/common/nnrt_utils.cpp @@ -222,7 +222,9 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, LOGE("[NNRtTest] OH_NNExecutor_SetOutput failed! ret=%d\n", ret); return ret; } - ret = device->MemoryCopy(expect, operandTem.length); + if(expect!=nullptr){ + ret = device->MemoryCopy(expect, operandTem.length); + } if (ret != OH_NN_SUCCESS) { LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret); return ret; diff --git a/ai/neural_network_runtime/stability/src/MultiThreadTest.cpp b/ai/neural_network_runtime/stability/src/MultiThreadTest.cpp index dee5ea9a9659f7597668c08166f7fc150e185bd3..a609bf446a3d87ab7c171144807688adac8555c2 100644 --- a/ai/neural_network_runtime/stability/src/MultiThreadTest.cpp +++ b/ai/neural_network_runtime/stability/src/MultiThreadTest.cpp @@ -48,8 +48,7 @@ void CompileModel(OH_NNCompilation *compilation, const OHNNCompileParam &compile void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs) { - float addExpectValue[4] = {0, 1, 2, 3}; - ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addExpectValue)); + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, nullptr)); }