diff --git a/ai/neural_network_runtime/v2_0/common/mock_idevice.cpp b/ai/neural_network_runtime/v2_0/common/mock_idevice.cpp index 457d9abde30cf882d724e4a48e5c6dd99458c994..b33576ca3cbf26e239af8b85c02b381faef7829b 100644 --- a/ai/neural_network_runtime/v2_0/common/mock_idevice.cpp +++ b/ai/neural_network_runtime/v2_0/common/mock_idevice.cpp @@ -206,6 +206,13 @@ int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config, return HDF_SUCCESS; } +int32_t MockIDevice::PrepareOfflineModel(const std::vector& offlineModels, const ModelConfig& config, + sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_0::MockIPreparedModel(); + return V2_0::NNRT_ReturnCode::NNRT_SUCCESS; +} + int32_t MockIDevice::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, sptr& preparedModel) { @@ -251,10 +258,9 @@ int32_t MockIPreparedModel::GetVersion(uint32_t &majorVersion, uint32_t &minorVe } int32_t MockIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, - std::vector>& outputsDims, std::vector& isOutputBufferEnough) + std::vector>& outputsDims) { outputsDims = {{1, 2, 2, 1}}; - isOutputBufferEnough = {true}; return HDF_SUCCESS; } diff --git a/ai/neural_network_runtime/v2_0/common/mock_idevice.h b/ai/neural_network_runtime/v2_0/common/mock_idevice.h index 93acc8b166f41f0667c749cb6788bf8a778678a4..83739d9b943d74a49ca6ff5a400e763129115e95 100644 --- a/ai/neural_network_runtime/v2_0/common/mock_idevice.h +++ b/ai/neural_network_runtime/v2_0/common/mock_idevice.h @@ -70,6 +70,9 @@ public: int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; + int32_t PrepareOfflineModel(const std::vector& offlineModels, const ModelConfig& config, + sptr& preparedModel) override; + int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, sptr& preparedModel) override; @@ -108,7 +111,7 @@ class MockIPreparedModel : public IPreparedModel { public: int32_t ExportModelCache(std::vector& modelCache) override; int32_t Run(const std::vector& inputs, const std::vector& outputs, - std::vector>& outputsDims, std::vector& isOutputBufferEnough) override; + std::vector>& outputsDims) override; int32_t GetInputDimRanges(std::vector>& minInputDims, std::vector>& maxInputDims) override; int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; MockIPreparedModel() = default; diff --git a/ai/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp b/ai/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp index 1fb82b66f7df53837959aaa361a6226384bea5ca..ae19874c531de51c565b1eceef9607c589056143 100644 --- a/ai/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp +++ b/ai/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp @@ -805,7 +805,7 @@ HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0200, Function | OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); ASSERT_NE(nullptr, executor); uint32_t inputIndex = 0; - for (auto i = 0; i < graphArgs.operands.size(); i++) { + for (size_t i = 0; i < graphArgs.operands.size(); i++) { const OHNNOperandTest &operandTem = graphArgs.operands[i]; auto quantParam = operandTem.quantParam; OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(),