提交 79e2f061 编写于 作者: T tangshihua

静态编码检查

Signed-off-by: Ntangshihua <tangshihua@huawei.com>
上级 0a4563ea
......@@ -109,31 +109,31 @@ int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)
return HDF_SUCCESS;
}
int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector<bool>& ops)
int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector<bool>& ops)
{
ops = m_operations;
return HDF_SUCCESS;
}
int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported)
int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported)
{
isSupported = m_fp16;
return HDF_SUCCESS;
}
int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported)
int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported)
{
isSupported = m_performance;
return HDF_SUCCESS;
}
int32_t MockIDevice::IsPrioritySupported(bool& isSupported)
int32_t MockIDevice::IsPrioritySupported(bool& isSupported)
{
isSupported = m_priority;
return HDF_SUCCESS;
}
int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported)
int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported)
{
isSupported = m_dynamic;
return HDF_SUCCESS;
......@@ -164,7 +164,7 @@ int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer)
buffer.dataSize = length;
m_ashmems[buffer.fd] = ashptr;
m_buffer_fd = buffer.fd;
m_bufferFd = buffer.fd;
return HDF_SUCCESS;
}
......@@ -175,10 +175,10 @@ int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
return HDF_SUCCESS;
}
int32_t MockIDevice::MemoryCopy(void *data, uint32_t length)
int32_t MockIDevice::MemoryCopy(float *data, uint32_t length)
{
auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance();
auto memAddress = memManager->MapMemory(m_buffer_fd, length);
auto memAddress = memManager->MapMemory(m_bufferFd, length);
if (memAddress == nullptr) {
LOGE("[NNRtTest] Map fd to address failed.");
return HDF_FAILURE;
......@@ -198,7 +198,7 @@ int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config,
}
int32_t MockIDevice::PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, const ModelConfig& config,
sptr<IPreparedModel>& preparedModel)
sptr<IPreparedModel>& preparedModel)
{
preparedModel = new (std::nothrow) V1_0::MockIPreparedModel();
return HDF_SUCCESS;
......
......@@ -70,7 +70,7 @@ public:
int32_t PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, const ModelConfig& config,
sptr<IPreparedModel>& preparedModel) override;
int32_t MemoryCopy(void *data, uint32_t length);
int32_t MemoryCopy(float *data, uint32_t length);
void SetFP16Supported(bool isSupported);
......@@ -91,7 +91,7 @@ public:
private:
std::unordered_map<int, sptr<Ashmem>> m_ashmems;
int m_buffer_fd;
int m_bufferFd;
bool m_fp16 = true;
bool m_performance = true;
bool m_priority = true;
......@@ -106,7 +106,7 @@ public:
int32_t Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs,
std::vector<std::vector<int32_t>>& outputsDims, std::vector<bool>& isOutputBufferEnough) override;
int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override;
MockIPreparedModel() = default;
MockIPreparedModel() = default;
};
} // namespace V1_0
......
......@@ -35,8 +35,7 @@ int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs)
const OHNNOperandTest &operandTem = graphArgs.operands[j][i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(),
quantParam, operandTem.type};
operandTem.shape.data(), quantParam, operandTem.type};
ret = OH_NNModel_AddTensor(model, &operand);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret);
......@@ -57,7 +56,7 @@ int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs)
auto outputIndices = TransformUInt32Array(graphArgs.outputIndices[j]);
ret = OH_NNModel_AddOperation(model, graphArgs.operationTypes[j], &paramIndices, &inputIndices,
&outputIndices);
&outputIndices);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret);
return ret;
......@@ -85,8 +84,7 @@ int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs)
const OHNNOperandTest &operandTem = graphArgs.operands[i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(),
quantParam, operandTem.type};
operandTem.shape.data(), quantParam, operandTem.type};
ret = OH_NNModel_AddTensor(model, &operand);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret);
......@@ -159,7 +157,7 @@ int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &comp
// set cache
if (!compileParam.cacheDir.empty()) {
ret = OH_NNCompilation_SetCache(compilation, compileParam.cacheDir.c_str(),
compileParam.cacheVersion);
compileParam.cacheVersion);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNCompilation_SetCache failed! ret=%d\n", ret);
return ret;
......@@ -196,7 +194,7 @@ int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &comp
int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
void *expect)
float* expect)
{
OHOS::sptr<V1_0::MockIDevice> device = V1_0::MockIDevice::GetInstance();
int ret = 0;
......@@ -206,12 +204,12 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
const OHNNOperandTest &operandTem = graphArgs.operands[i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(),
quantParam, operandTem.type};
operandTem.shape.data(),
quantParam, operandTem.type};
if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) !=
graphArgs.inputIndices.end()) {
ret = OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data,
operandTem.length);
operandTem.length);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNExecutor_SetInput failed! ret=%d\n", ret);
return ret;
......@@ -236,7 +234,7 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
return ret;
}
int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], void *expect)
int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], float *expect)
{
OHOS::sptr<V1_0::MockIDevice> device = V1_0::MockIDevice::GetInstance();
int ret = 0;
......@@ -246,12 +244,12 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
const OHNNOperandTest &operandTem = graphArgs.operands[i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(),
quantParam, operandTem.type};
operandTem.shape.data(),
quantParam, operandTem.type};
if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) !=
graphArgs.inputIndices.end()) {
OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex,
operandTem.length);
operandTem.length);
ret = OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNExecutor_SetInputWithMemory failed! ret=%d\n", ret);
......@@ -263,7 +261,7 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
} else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) !=
graphArgs.outputIndices.end()) {
OH_NN_Memory *outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex,
operandTem.length);
operandTem.length);
ret = OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNExecutor_SetOutputWithMemory failed! ret=%d\n", ret);
......@@ -273,12 +271,11 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret);
return ret;
}
}
OHNNMemory[inputIndex + outputIndex] = outputMemory;
outputIndex += 1;
}
}
ret = OH_NNExecutor_Run(executor);
return ret;
}
......@@ -407,6 +404,7 @@ bool CreateFolder(const std::string &path)
return false;
}
LOGI("CreateFolder:%s", path.c_str());
mode_t mode = 0700;
for (int i = 1; i < path.size() - 1; i++) {
if (path[i] != '/') {
continue;
......@@ -417,14 +415,14 @@ bool CreateFolder(const std::string &path)
continue;
case PathType::NOT_FOUND:
LOGI("mkdir: %s", path.substr(0, i).c_str());
mkdir(path.substr(0, i).c_str(), 0700);
mkdir(path.substr(0, i).c_str(), mode);
break;
default:
LOGI("error: %s", path.substr(0, i).c_str());
return false;
}
}
mkdir(path.c_str(), 0700);
mkdir(path.c_str(), mode);
return CheckPath(path) == PathType::DIR;
}
......
......@@ -69,13 +69,14 @@ struct OHNNCompileParam {
int BuildSingleOpGraph(OH_NNModel *modelptr, const OHNNGraphArgs &args);
int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], void* expect);
int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[],
float* expect);
void Free(OH_NNModel *model = nullptr, OH_NNCompilation *compilation = nullptr, OH_NNExecutor *executor = nullptr);
int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam);
int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, void* expect);
int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float * expect);
int SetDevice(OH_NNCompilation *compilation);
int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs);
......
......@@ -38,7 +38,8 @@ public:
{
DeleteFolder(CACHE_DIR);
}
void GenCacheFile() {
void GenCacheFile()
{
OH_NNModel *model = OH_NNModel_Construct();
ASSERT_NE(nullptr, model);
ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs));
......@@ -53,15 +54,16 @@ public:
ASSERT_TRUE(CheckPath(CACHE_PATH) == PathType::FILE);
ASSERT_TRUE(CheckPath(CACHE_INFO_PATH) == PathType::FILE);
}
void DestroyCache() {
void DestroyCache()
{
std::ifstream ifs(CACHE_PATH.c_str(), std::ios::in | std::ios::binary);
char* ptr{nullptr};
int cache_size = ifs.tellg();
int invalid_cache_size = cache_size * 0.9;
ifs.read(ptr, cache_size);
int cacheSize = ifs.tellg();
int invalidCacheSize = cacheSize * 0.9;
ifs.read(ptr, cacheSize);
ifs.close();
std::ofstream ofs(CACHE_PATH.c_str(), std::ios::out | std::ios::binary);
ofs.write(ptr, invalid_cache_size);
ofs.write(ptr, invalidCacheSize);
ofs.close();
}
......
......@@ -28,14 +28,6 @@ using namespace OHOS::HDI::Nnrt::V1_0;
namespace {
class ExecutorTest : public testing::Test {
public:
void SetUp()
{
}
void TearDown()
{
}
protected:
OHOS::sptr<V1_0::MockIDevice> device;
AddModel addModel;
......@@ -43,7 +35,7 @@ protected:
OHNNCompileParam compileParam;
};
void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, void* expect)
void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float* expect)
{
ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, expect));
}
......
......@@ -29,14 +29,6 @@ using namespace OHOS::HDI::Nnrt::V1_0;
namespace {
class MemoryTest : public testing::Test {
public:
void SetUp()
{
}
void TearDown()
{
}
protected:
AddModel addModel;
OHNNGraphArgs graphArgs = addModel.graphArgs;
......@@ -854,7 +846,7 @@ HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0300, Function |
for (auto j = 0; j < graphArgs.outputIndices.size(); j++) {
auto outputIndex = graphArgs.inputIndices.size() + j;
// check memory output
EXPECT_TRUE(CheckOutput(static_cast<float*>(const_cast<void*>(OHNNMemory[outputIndex]->data)),
EXPECT_TRUE(CheckOutput(static_cast<float*>(const_cast<void*>(OHNNMemory[outputIndex]->data)),
(float*) addModel.expectValue));
OH_NNExecutor_DestroyOutputMemory(executor, j, &OHNNMemory[outputIndex]);
ASSERT_EQ(OHNNMemory[outputIndex], nullptr);
......@@ -898,7 +890,7 @@ HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0400, Function |
for (auto j = 0; j < graphArgs.outputIndices.size(); j++) {
auto outputIndex = graphArgs.inputIndices.size() + j;
// check memory output
EXPECT_TRUE(CheckOutput(static_cast<float*>(const_cast<void*>(OHNNMemory[outputIndex]->data)),
EXPECT_TRUE(CheckOutput(static_cast<float*>(const_cast<void*>(OHNNMemory[outputIndex]->data)),
(float*) avgModel.expectValue));
OH_NNExecutor_DestroyOutputMemory(executor, j, &OHNNMemory[outputIndex]);
ASSERT_EQ(OHNNMemory[outputIndex], nullptr);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册