未验证 提交 ecd5252b 编写于 作者: O openharmony_ci 提交者: Gitee

!7090 AI子系统优化多线程稳定性用例

Merge pull request !7090 from 汤石华/master
...@@ -147,6 +147,7 @@ int32_t MockIDevice::IsModelCacheSupported(bool& isSupported) ...@@ -147,6 +147,7 @@ int32_t MockIDevice::IsModelCacheSupported(bool& isSupported)
int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer)
{ {
std::lock_guard<std::mutex> lock(m_mtx);
sptr<Ashmem> ashptr = Ashmem::CreateAshmem("allocateBuffer", length); sptr<Ashmem> ashptr = Ashmem::CreateAshmem("allocateBuffer", length);
if (ashptr == nullptr) { if (ashptr == nullptr) {
LOGE("[NNRtTest] Create shared memory failed."); LOGE("[NNRtTest] Create shared memory failed.");
...@@ -170,6 +171,7 @@ int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) ...@@ -170,6 +171,7 @@ int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer)
int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
{ {
std::lock_guard<std::mutex> lock(m_mtx);
auto ash = m_ashmems[buffer.fd]; auto ash = m_ashmems[buffer.fd];
ash->UnmapAshmem(); ash->UnmapAshmem();
return HDF_SUCCESS; return HDF_SUCCESS;
...@@ -177,6 +179,7 @@ int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) ...@@ -177,6 +179,7 @@ int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
int32_t MockIDevice::MemoryCopy(float* data, uint32_t length) int32_t MockIDevice::MemoryCopy(float* data, uint32_t length)
{ {
std::lock_guard<std::mutex> lock(m_mtx);
auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance(); auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance();
auto memAddress = memManager->MapMemory(m_bufferFd, length); auto memAddress = memManager->MapMemory(m_bufferFd, length);
if (memAddress == nullptr) { if (memAddress == nullptr) {
......
...@@ -98,6 +98,7 @@ private: ...@@ -98,6 +98,7 @@ private:
bool m_cache = true; bool m_cache = true;
bool m_dynamic = true; bool m_dynamic = true;
std::vector<bool> m_operations{true}; std::vector<bool> m_operations{true};
std::mutex m_mtx;
}; };
class MockIPreparedModel : public IPreparedModel { class MockIPreparedModel : public IPreparedModel {
......
...@@ -222,7 +222,9 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, ...@@ -222,7 +222,9 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
LOGE("[NNRtTest] OH_NNExecutor_SetOutput failed! ret=%d\n", ret); LOGE("[NNRtTest] OH_NNExecutor_SetOutput failed! ret=%d\n", ret);
return ret; return ret;
} }
ret = device->MemoryCopy(expect, operandTem.length); if(expect!=nullptr){
ret = device->MemoryCopy(expect, operandTem.length);
}
if (ret != OH_NN_SUCCESS) { if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret); LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret);
return ret; return ret;
......
...@@ -48,8 +48,7 @@ void CompileModel(OH_NNCompilation *compilation, const OHNNCompileParam &compile ...@@ -48,8 +48,7 @@ void CompileModel(OH_NNCompilation *compilation, const OHNNCompileParam &compile
void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs) void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs)
{ {
float addExpectValue[4] = {0, 1, 2, 3}; ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, nullptr));
ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addExpectValue));
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册