提交 2620890a 编写于 作者: T tangshihua

add mutex lock

Signed-off-by: Ntangshihua <tangshihua@huawei.com>
上级 50ffb56b
...@@ -147,6 +147,7 @@ int32_t MockIDevice::IsModelCacheSupported(bool& isSupported) ...@@ -147,6 +147,7 @@ int32_t MockIDevice::IsModelCacheSupported(bool& isSupported)
int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer)
{ {
std::lock_guard<std::mutex> lock(m_mtx);
sptr<Ashmem> ashptr = Ashmem::CreateAshmem("allocateBuffer", length); sptr<Ashmem> ashptr = Ashmem::CreateAshmem("allocateBuffer", length);
if (ashptr == nullptr) { if (ashptr == nullptr) {
LOGE("[NNRtTest] Create shared memory failed."); LOGE("[NNRtTest] Create shared memory failed.");
...@@ -170,6 +171,7 @@ int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) ...@@ -170,6 +171,7 @@ int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer)
int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
{ {
std::lock_guard<std::mutex> lock(m_mtx);
auto ash = m_ashmems[buffer.fd]; auto ash = m_ashmems[buffer.fd];
ash->UnmapAshmem(); ash->UnmapAshmem();
return HDF_SUCCESS; return HDF_SUCCESS;
...@@ -177,6 +179,7 @@ int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) ...@@ -177,6 +179,7 @@ int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
int32_t MockIDevice::MemoryCopy(float* data, uint32_t length) int32_t MockIDevice::MemoryCopy(float* data, uint32_t length)
{ {
std::lock_guard<std::mutex> lock(m_mtx);
auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance(); auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance();
auto memAddress = memManager->MapMemory(m_bufferFd, length); auto memAddress = memManager->MapMemory(m_bufferFd, length);
if (memAddress == nullptr) { if (memAddress == nullptr) {
......
...@@ -97,7 +97,7 @@ private: ...@@ -97,7 +97,7 @@ private:
bool m_priority = true; bool m_priority = true;
bool m_cache = true; bool m_cache = true;
bool m_dynamic = true; bool m_dynamic = true;
std::vector<bool> m_operations{true}; std::mutex m_mtx;
}; };
class MockIPreparedModel : public IPreparedModel { class MockIPreparedModel : public IPreparedModel {
......
...@@ -222,7 +222,9 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, ...@@ -222,7 +222,9 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
LOGE("[NNRtTest] OH_NNExecutor_SetOutput failed! ret=%d\n", ret); LOGE("[NNRtTest] OH_NNExecutor_SetOutput failed! ret=%d\n", ret);
return ret; return ret;
} }
ret = device->MemoryCopy(expect, operandTem.length); if(expect!=nullptr){
ret = device->MemoryCopy(expect, operandTem.length);
}
if (ret != OH_NN_SUCCESS) { if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret); LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret);
return ret; return ret;
......
...@@ -48,8 +48,7 @@ void CompileModel(OH_NNCompilation *compilation, const OHNNCompileParam &compile ...@@ -48,8 +48,7 @@ void CompileModel(OH_NNCompilation *compilation, const OHNNCompileParam &compile
void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs) void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs)
{ {
float addExpectValue[4] = {0, 1, 2, 3}; ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, nullptr));
ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addExpectValue));
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册