提交 57800320 编写于 作者: T tangshihua

静态检查修改

Signed-off-by: Ntangshihua <tangshihua@huawei.com>
上级 79e2f061
......@@ -175,7 +175,7 @@ int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
return HDF_SUCCESS;
}
int32_t MockIDevice::MemoryCopy(float *data, uint32_t length)
int32_t MockIDevice::MemoryCopy(float* data, uint32_t length)
{
auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance();
auto memAddress = memManager->MapMemory(m_bufferFd, length);
......@@ -198,7 +198,7 @@ int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config,
}
int32_t MockIDevice::PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, const ModelConfig& config,
sptr<IPreparedModel>& preparedModel)
sptr<IPreparedModel>& preparedModel)
{
preparedModel = new (std::nothrow) V1_0::MockIPreparedModel();
return HDF_SUCCESS;
......
......@@ -70,7 +70,7 @@ public:
int32_t PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, const ModelConfig& config,
sptr<IPreparedModel>& preparedModel) override;
int32_t MemoryCopy(float *data, uint32_t length);
int32_t MemoryCopy(float* data, uint32_t length);
void SetFP16Supported(bool isSupported);
......
......@@ -35,7 +35,7 @@ int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs)
const OHNNOperandTest &operandTem = graphArgs.operands[j][i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(), quantParam, operandTem.type};
operandTem.shape.data(), quantParam, operandTem.type};
ret = OH_NNModel_AddTensor(model, &operand);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret);
......@@ -84,7 +84,7 @@ int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs)
const OHNNOperandTest &operandTem = graphArgs.operands[i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(), quantParam, operandTem.type};
operandTem.shape.data(), quantParam, operandTem.type};
ret = OH_NNModel_AddTensor(model, &operand);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret);
......@@ -194,7 +194,7 @@ int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &comp
int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
float* expect)
float* expect)
{
OHOS::sptr<V1_0::MockIDevice> device = V1_0::MockIDevice::GetInstance();
int ret = 0;
......@@ -204,8 +204,8 @@ float* expect)
const OHNNOperandTest &operandTem = graphArgs.operands[i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(),
quantParam, operandTem.type};
operandTem.shape.data(),
quantParam, operandTem.type};
if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) !=
graphArgs.inputIndices.end()) {
ret = OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data,
......@@ -234,7 +234,8 @@ float* expect)
return ret;
}
int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], float *expect)
int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[],
float* expect)
{
OHOS::sptr<V1_0::MockIDevice> device = V1_0::MockIDevice::GetInstance();
int ret = 0;
......@@ -244,8 +245,8 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
const OHNNOperandTest &operandTem = graphArgs.operands[i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(),
quantParam, operandTem.type};
operandTem.shape.data(),
quantParam, operandTem.type};
if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) !=
graphArgs.inputIndices.end()) {
OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex,
......@@ -261,7 +262,7 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
} else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) !=
graphArgs.outputIndices.end()) {
OH_NN_Memory *outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex,
operandTem.length);
operandTem.length);
ret = OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNExecutor_SetOutputWithMemory failed! ret=%d\n", ret);
......
......@@ -70,13 +70,13 @@ struct OHNNCompileParam {
int BuildSingleOpGraph(OH_NNModel *modelptr, const OHNNGraphArgs &args);
int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[],
float* expect);
float* expect);
void Free(OH_NNModel *model = nullptr, OH_NNCompilation *compilation = nullptr, OH_NNExecutor *executor = nullptr);
int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam);
int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float * expect);
int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float* expect);
int SetDevice(OH_NNCompilation *compilation);
int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs);
......
......@@ -1204,7 +1204,7 @@ HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0500, Function |
}
ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor));
// check result
EXPECT_TRUE(CheckOutput(static_cast<float*>(const_cast<void*>(outputMemory->data)),
EXPECT_TRUE(CheckOutput(static_cast<float*>(const_cast<void*>(outputMemory->data)),
(float*) addModel.expectValue));
OH_NNExecutor_DestroyOutputMemory(executor, 0, &outputMemory);
......
......@@ -854,8 +854,8 @@ HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0400, Fun
uint32_t devicesCount{0};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount));
size_t targetDevice = devicesID[0];
ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice, &realSupported, &opCount));
ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice,
&realSupported, &opCount));
Free(model);
}
......@@ -875,8 +875,8 @@ HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0500, Fun
uint32_t devicesCount{0};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount));
size_t targetDevice = devicesID[0];
ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice, &isSupported, nullptr));
ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice,
&isSupported, nullptr));
Free(model);
}
......@@ -898,8 +898,8 @@ HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0600, Fun
uint32_t devicesCount{0};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount));
size_t targetDevice = devicesID[0];
ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNModel_GetAvailableOperations(model, targetDevice, &isSupported, &opCount));
ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNModel_GetAvailableOperations(model, targetDevice,
&isSupported, &opCount));
Free(model);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册