Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
Xts Acts
提交
79e2f061
X
Xts Acts
项目概览
OpenHarmony
/
Xts Acts
1 年多 前同步成功
通知
9
Star
22
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
Xts Acts
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
79e2f061
编写于
11月 14, 2022
作者:
T
tangshihua
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
静态编码检查
Signed-off-by:
N
tangshihua
<
tangshihua@huawei.com
>
上级
0a4563ea
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
43 addition
and
58 deletion
+43
-58
ai/neural_network_runtime/common/mock_idevice.cpp
ai/neural_network_runtime/common/mock_idevice.cpp
+9
-9
ai/neural_network_runtime/common/mock_idevice.h
ai/neural_network_runtime/common/mock_idevice.h
+3
-3
ai/neural_network_runtime/common/nnrt_utils.cpp
ai/neural_network_runtime/common/nnrt_utils.cpp
+17
-19
ai/neural_network_runtime/common/nnrt_utils.h
ai/neural_network_runtime/common/nnrt_utils.h
+3
-2
ai/neural_network_runtime/interface/src/CompileTest.cpp
ai/neural_network_runtime/interface/src/CompileTest.cpp
+8
-6
ai/neural_network_runtime/interface/src/ExecutorTest.cpp
ai/neural_network_runtime/interface/src/ExecutorTest.cpp
+1
-9
ai/neural_network_runtime/interface/src/MemoryTest.cpp
ai/neural_network_runtime/interface/src/MemoryTest.cpp
+2
-10
未找到文件。
ai/neural_network_runtime/common/mock_idevice.cpp
浏览文件 @
79e2f061
...
...
@@ -109,31 +109,31 @@ int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)
return
HDF_SUCCESS
;
}
int32_t
MockIDevice
::
GetSupportedOperation
(
const
Model
&
model
,
std
::
vector
<
bool
>&
ops
)
int32_t
MockIDevice
::
GetSupportedOperation
(
const
Model
&
model
,
std
::
vector
<
bool
>&
ops
)
{
ops
=
m_operations
;
return
HDF_SUCCESS
;
}
int32_t
MockIDevice
::
IsFloat16PrecisionSupported
(
bool
&
isSupported
)
int32_t
MockIDevice
::
IsFloat16PrecisionSupported
(
bool
&
isSupported
)
{
isSupported
=
m_fp16
;
return
HDF_SUCCESS
;
}
int32_t
MockIDevice
::
IsPerformanceModeSupported
(
bool
&
isSupported
)
int32_t
MockIDevice
::
IsPerformanceModeSupported
(
bool
&
isSupported
)
{
isSupported
=
m_performance
;
return
HDF_SUCCESS
;
}
int32_t
MockIDevice
::
IsPrioritySupported
(
bool
&
isSupported
)
int32_t
MockIDevice
::
IsPrioritySupported
(
bool
&
isSupported
)
{
isSupported
=
m_priority
;
return
HDF_SUCCESS
;
}
int32_t
MockIDevice
::
IsDynamicInputSupported
(
bool
&
isSupported
)
int32_t
MockIDevice
::
IsDynamicInputSupported
(
bool
&
isSupported
)
{
isSupported
=
m_dynamic
;
return
HDF_SUCCESS
;
...
...
@@ -164,7 +164,7 @@ int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer)
buffer
.
dataSize
=
length
;
m_ashmems
[
buffer
.
fd
]
=
ashptr
;
m_buffer
_f
d
=
buffer
.
fd
;
m_buffer
F
d
=
buffer
.
fd
;
return
HDF_SUCCESS
;
}
...
...
@@ -175,10 +175,10 @@ int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
return
HDF_SUCCESS
;
}
int32_t
MockIDevice
::
MemoryCopy
(
void
*
data
,
uint32_t
length
)
int32_t
MockIDevice
::
MemoryCopy
(
float
*
data
,
uint32_t
length
)
{
auto
memManager
=
NeuralNetworkRuntime
::
MemoryManager
::
GetInstance
();
auto
memAddress
=
memManager
->
MapMemory
(
m_buffer
_f
d
,
length
);
auto
memAddress
=
memManager
->
MapMemory
(
m_buffer
F
d
,
length
);
if
(
memAddress
==
nullptr
)
{
LOGE
(
"[NNRtTest] Map fd to address failed."
);
return
HDF_FAILURE
;
...
...
@@ -198,7 +198,7 @@ int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config,
}
int32_t
MockIDevice
::
PrepareModelFromModelCache
(
const
std
::
vector
<
SharedBuffer
>&
modelCache
,
const
ModelConfig
&
config
,
sptr
<
IPreparedModel
>&
preparedModel
)
sptr
<
IPreparedModel
>&
preparedModel
)
{
preparedModel
=
new
(
std
::
nothrow
)
V1_0
::
MockIPreparedModel
();
return
HDF_SUCCESS
;
...
...
ai/neural_network_runtime/common/mock_idevice.h
浏览文件 @
79e2f061
...
...
@@ -70,7 +70,7 @@ public:
int32_t
PrepareModelFromModelCache
(
const
std
::
vector
<
SharedBuffer
>&
modelCache
,
const
ModelConfig
&
config
,
sptr
<
IPreparedModel
>&
preparedModel
)
override
;
int32_t
MemoryCopy
(
void
*
data
,
uint32_t
length
);
int32_t
MemoryCopy
(
float
*
data
,
uint32_t
length
);
void
SetFP16Supported
(
bool
isSupported
);
...
...
@@ -91,7 +91,7 @@ public:
private:
std
::
unordered_map
<
int
,
sptr
<
Ashmem
>>
m_ashmems
;
int
m_buffer
_f
d
;
int
m_buffer
F
d
;
bool
m_fp16
=
true
;
bool
m_performance
=
true
;
bool
m_priority
=
true
;
...
...
@@ -106,7 +106,7 @@ public:
int32_t
Run
(
const
std
::
vector
<
IOTensor
>&
inputs
,
const
std
::
vector
<
IOTensor
>&
outputs
,
std
::
vector
<
std
::
vector
<
int32_t
>>&
outputsDims
,
std
::
vector
<
bool
>&
isOutputBufferEnough
)
override
;
int32_t
GetVersion
(
uint32_t
&
majorVersion
,
uint32_t
&
minorVersion
)
override
;
MockIPreparedModel
()
=
default
;
MockIPreparedModel
()
=
default
;
};
}
// namespace V1_0
...
...
ai/neural_network_runtime/common/nnrt_utils.cpp
浏览文件 @
79e2f061
...
...
@@ -35,8 +35,7 @@ int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs)
const
OHNNOperandTest
&
operandTem
=
graphArgs
.
operands
[
j
][
i
];
auto
quantParam
=
operandTem
.
quantParam
;
OH_NN_Tensor
operand
=
{
operandTem
.
dataType
,
(
uint32_t
)
operandTem
.
shape
.
size
(),
operandTem
.
shape
.
data
(),
quantParam
,
operandTem
.
type
};
operandTem
.
shape
.
data
(),
quantParam
,
operandTem
.
type
};
ret
=
OH_NNModel_AddTensor
(
model
,
&
operand
);
if
(
ret
!=
OH_NN_SUCCESS
)
{
LOGE
(
"[NNRtTest] OH_NNModel_AddTensor failed! ret=%d
\n
"
,
ret
);
...
...
@@ -57,7 +56,7 @@ int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs)
auto
outputIndices
=
TransformUInt32Array
(
graphArgs
.
outputIndices
[
j
]);
ret
=
OH_NNModel_AddOperation
(
model
,
graphArgs
.
operationTypes
[
j
],
&
paramIndices
,
&
inputIndices
,
&
outputIndices
);
&
outputIndices
);
if
(
ret
!=
OH_NN_SUCCESS
)
{
LOGE
(
"[NNRtTest] OH_NNModel_AddOperation failed! ret=%d
\n
"
,
ret
);
return
ret
;
...
...
@@ -85,8 +84,7 @@ int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs)
const
OHNNOperandTest
&
operandTem
=
graphArgs
.
operands
[
i
];
auto
quantParam
=
operandTem
.
quantParam
;
OH_NN_Tensor
operand
=
{
operandTem
.
dataType
,
(
uint32_t
)
operandTem
.
shape
.
size
(),
operandTem
.
shape
.
data
(),
quantParam
,
operandTem
.
type
};
operandTem
.
shape
.
data
(),
quantParam
,
operandTem
.
type
};
ret
=
OH_NNModel_AddTensor
(
model
,
&
operand
);
if
(
ret
!=
OH_NN_SUCCESS
)
{
LOGE
(
"[NNRtTest] OH_NNModel_AddTensor failed! ret=%d
\n
"
,
ret
);
...
...
@@ -159,7 +157,7 @@ int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &comp
// set cache
if
(
!
compileParam
.
cacheDir
.
empty
())
{
ret
=
OH_NNCompilation_SetCache
(
compilation
,
compileParam
.
cacheDir
.
c_str
(),
compileParam
.
cacheVersion
);
compileParam
.
cacheVersion
);
if
(
ret
!=
OH_NN_SUCCESS
)
{
LOGE
(
"[NNRtTest] OH_NNCompilation_SetCache failed! ret=%d
\n
"
,
ret
);
return
ret
;
...
...
@@ -196,7 +194,7 @@ int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &comp
int
ExecuteGraphMock
(
OH_NNExecutor
*
executor
,
const
OHNNGraphArgs
&
graphArgs
,
void
*
expect
)
float
*
expect
)
{
OHOS
::
sptr
<
V1_0
::
MockIDevice
>
device
=
V1_0
::
MockIDevice
::
GetInstance
();
int
ret
=
0
;
...
...
@@ -206,12 +204,12 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
const
OHNNOperandTest
&
operandTem
=
graphArgs
.
operands
[
i
];
auto
quantParam
=
operandTem
.
quantParam
;
OH_NN_Tensor
operand
=
{
operandTem
.
dataType
,
(
uint32_t
)
operandTem
.
shape
.
size
(),
operandTem
.
shape
.
data
(),
quantParam
,
operandTem
.
type
};
operandTem
.
shape
.
data
(),
quantParam
,
operandTem
.
type
};
if
(
std
::
find
(
graphArgs
.
inputIndices
.
begin
(),
graphArgs
.
inputIndices
.
end
(),
i
)
!=
graphArgs
.
inputIndices
.
end
())
{
ret
=
OH_NNExecutor_SetInput
(
executor
,
inputIndex
,
&
operand
,
operandTem
.
data
,
operandTem
.
length
);
operandTem
.
length
);
if
(
ret
!=
OH_NN_SUCCESS
)
{
LOGE
(
"[NNRtTest] OH_NNExecutor_SetInput failed! ret=%d
\n
"
,
ret
);
return
ret
;
...
...
@@ -236,7 +234,7 @@ int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
return
ret
;
}
int
ExecutorWithMemory
(
OH_NNExecutor
*
executor
,
const
OHNNGraphArgs
&
graphArgs
,
OH_NN_Memory
*
OHNNMemory
[],
void
*
expect
)
int
ExecutorWithMemory
(
OH_NNExecutor
*
executor
,
const
OHNNGraphArgs
&
graphArgs
,
OH_NN_Memory
*
OHNNMemory
[],
float
*
expect
)
{
OHOS
::
sptr
<
V1_0
::
MockIDevice
>
device
=
V1_0
::
MockIDevice
::
GetInstance
();
int
ret
=
0
;
...
...
@@ -246,12 +244,12 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
const
OHNNOperandTest
&
operandTem
=
graphArgs
.
operands
[
i
];
auto
quantParam
=
operandTem
.
quantParam
;
OH_NN_Tensor
operand
=
{
operandTem
.
dataType
,
(
uint32_t
)
operandTem
.
shape
.
size
(),
operandTem
.
shape
.
data
(),
quantParam
,
operandTem
.
type
};
operandTem
.
shape
.
data
(),
quantParam
,
operandTem
.
type
};
if
(
std
::
find
(
graphArgs
.
inputIndices
.
begin
(),
graphArgs
.
inputIndices
.
end
(),
i
)
!=
graphArgs
.
inputIndices
.
end
())
{
OH_NN_Memory
*
inputMemory
=
OH_NNExecutor_AllocateInputMemory
(
executor
,
inputIndex
,
operandTem
.
length
);
operandTem
.
length
);
ret
=
OH_NNExecutor_SetInputWithMemory
(
executor
,
inputIndex
,
&
operand
,
inputMemory
);
if
(
ret
!=
OH_NN_SUCCESS
)
{
LOGE
(
"[NNRtTest] OH_NNExecutor_SetInputWithMemory failed! ret=%d
\n
"
,
ret
);
...
...
@@ -263,7 +261,7 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
}
else
if
(
std
::
find
(
graphArgs
.
outputIndices
.
begin
(),
graphArgs
.
outputIndices
.
end
(),
i
)
!=
graphArgs
.
outputIndices
.
end
())
{
OH_NN_Memory
*
outputMemory
=
OH_NNExecutor_AllocateOutputMemory
(
executor
,
outputIndex
,
operandTem
.
length
);
operandTem
.
length
);
ret
=
OH_NNExecutor_SetOutputWithMemory
(
executor
,
outputIndex
,
outputMemory
);
if
(
ret
!=
OH_NN_SUCCESS
)
{
LOGE
(
"[NNRtTest] OH_NNExecutor_SetOutputWithMemory failed! ret=%d
\n
"
,
ret
);
...
...
@@ -273,12 +271,11 @@ int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
if
(
ret
!=
OH_NN_SUCCESS
)
{
LOGE
(
"[NNRtTest] device set expect output failed! ret=%d
\n
"
,
ret
);
return
ret
;
}
}
OHNNMemory
[
inputIndex
+
outputIndex
]
=
outputMemory
;
outputIndex
+=
1
;
}
}
ret
=
OH_NNExecutor_Run
(
executor
);
return
ret
;
}
...
...
@@ -407,6 +404,7 @@ bool CreateFolder(const std::string &path)
return
false
;
}
LOGI
(
"CreateFolder:%s"
,
path
.
c_str
());
mode_t
mode
=
0700
;
for
(
int
i
=
1
;
i
<
path
.
size
()
-
1
;
i
++
)
{
if
(
path
[
i
]
!=
'/'
)
{
continue
;
...
...
@@ -417,14 +415,14 @@ bool CreateFolder(const std::string &path)
continue
;
case
PathType
::
NOT_FOUND
:
LOGI
(
"mkdir: %s"
,
path
.
substr
(
0
,
i
).
c_str
());
mkdir
(
path
.
substr
(
0
,
i
).
c_str
(),
0700
);
mkdir
(
path
.
substr
(
0
,
i
).
c_str
(),
mode
);
break
;
default:
LOGI
(
"error: %s"
,
path
.
substr
(
0
,
i
).
c_str
());
return
false
;
}
}
mkdir
(
path
.
c_str
(),
0700
);
mkdir
(
path
.
c_str
(),
mode
);
return
CheckPath
(
path
)
==
PathType
::
DIR
;
}
...
...
ai/neural_network_runtime/common/nnrt_utils.h
浏览文件 @
79e2f061
...
...
@@ -69,13 +69,14 @@ struct OHNNCompileParam {
int
BuildSingleOpGraph
(
OH_NNModel
*
modelptr
,
const
OHNNGraphArgs
&
args
);
int
ExecutorWithMemory
(
OH_NNExecutor
*
executor
,
const
OHNNGraphArgs
&
graphArgs
,
OH_NN_Memory
*
OHNNMemory
[],
void
*
expect
);
int
ExecutorWithMemory
(
OH_NNExecutor
*
executor
,
const
OHNNGraphArgs
&
graphArgs
,
OH_NN_Memory
*
OHNNMemory
[],
float
*
expect
);
void
Free
(
OH_NNModel
*
model
=
nullptr
,
OH_NNCompilation
*
compilation
=
nullptr
,
OH_NNExecutor
*
executor
=
nullptr
);
int
CompileGraphMock
(
OH_NNCompilation
*
compilation
,
const
OHNNCompileParam
&
compileParam
);
int
ExecuteGraphMock
(
OH_NNExecutor
*
executor
,
const
OHNNGraphArgs
&
graphArgs
,
void
*
expect
);
int
ExecuteGraphMock
(
OH_NNExecutor
*
executor
,
const
OHNNGraphArgs
&
graphArgs
,
float
*
expect
);
int
SetDevice
(
OH_NNCompilation
*
compilation
);
int
BuildMultiOpGraph
(
OH_NNModel
*
model
,
const
OHNNGraphArgsMulti
&
graphArgs
);
...
...
ai/neural_network_runtime/interface/src/CompileTest.cpp
浏览文件 @
79e2f061
...
...
@@ -38,7 +38,8 @@ public:
{
DeleteFolder
(
CACHE_DIR
);
}
void
GenCacheFile
()
{
void
GenCacheFile
()
{
OH_NNModel
*
model
=
OH_NNModel_Construct
();
ASSERT_NE
(
nullptr
,
model
);
ASSERT_EQ
(
OH_NN_SUCCESS
,
BuildSingleOpGraph
(
model
,
graphArgs
));
...
...
@@ -53,15 +54,16 @@ public:
ASSERT_TRUE
(
CheckPath
(
CACHE_PATH
)
==
PathType
::
FILE
);
ASSERT_TRUE
(
CheckPath
(
CACHE_INFO_PATH
)
==
PathType
::
FILE
);
}
void
DestroyCache
()
{
void
DestroyCache
()
{
std
::
ifstream
ifs
(
CACHE_PATH
.
c_str
(),
std
::
ios
::
in
|
std
::
ios
::
binary
);
char
*
ptr
{
nullptr
};
int
cache
_s
ize
=
ifs
.
tellg
();
int
invalid
_cache_size
=
cache_s
ize
*
0.9
;
ifs
.
read
(
ptr
,
cache
_s
ize
);
int
cache
S
ize
=
ifs
.
tellg
();
int
invalid
CacheSize
=
cacheS
ize
*
0.9
;
ifs
.
read
(
ptr
,
cache
S
ize
);
ifs
.
close
();
std
::
ofstream
ofs
(
CACHE_PATH
.
c_str
(),
std
::
ios
::
out
|
std
::
ios
::
binary
);
ofs
.
write
(
ptr
,
invalid
_cache_s
ize
);
ofs
.
write
(
ptr
,
invalid
CacheS
ize
);
ofs
.
close
();
}
...
...
ai/neural_network_runtime/interface/src/ExecutorTest.cpp
浏览文件 @
79e2f061
...
...
@@ -28,14 +28,6 @@ using namespace OHOS::HDI::Nnrt::V1_0;
namespace
{
class
ExecutorTest
:
public
testing
::
Test
{
public:
void
SetUp
()
{
}
void
TearDown
()
{
}
protected:
OHOS
::
sptr
<
V1_0
::
MockIDevice
>
device
;
AddModel
addModel
;
...
...
@@ -43,7 +35,7 @@ protected:
OHNNCompileParam
compileParam
;
};
void
ExecuteModel
(
OH_NNExecutor
*
executor
,
const
OHNNGraphArgs
&
graphArgs
,
void
*
expect
)
void
ExecuteModel
(
OH_NNExecutor
*
executor
,
const
OHNNGraphArgs
&
graphArgs
,
float
*
expect
)
{
ASSERT_EQ
(
OH_NN_SUCCESS
,
ExecuteGraphMock
(
executor
,
graphArgs
,
expect
));
}
...
...
ai/neural_network_runtime/interface/src/MemoryTest.cpp
浏览文件 @
79e2f061
...
...
@@ -29,14 +29,6 @@ using namespace OHOS::HDI::Nnrt::V1_0;
namespace
{
class
MemoryTest
:
public
testing
::
Test
{
public:
void
SetUp
()
{
}
void
TearDown
()
{
}
protected:
AddModel
addModel
;
OHNNGraphArgs
graphArgs
=
addModel
.
graphArgs
;
...
...
@@ -854,7 +846,7 @@ HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0300, Function |
for
(
auto
j
=
0
;
j
<
graphArgs
.
outputIndices
.
size
();
j
++
)
{
auto
outputIndex
=
graphArgs
.
inputIndices
.
size
()
+
j
;
// check memory output
EXPECT_TRUE
(
CheckOutput
(
static_cast
<
float
*>
(
const_cast
<
void
*>
(
OHNNMemory
[
outputIndex
]
->
data
)),
EXPECT_TRUE
(
CheckOutput
(
static_cast
<
float
*>
(
const_cast
<
void
*>
(
OHNNMemory
[
outputIndex
]
->
data
)),
(
float
*
)
addModel
.
expectValue
));
OH_NNExecutor_DestroyOutputMemory
(
executor
,
j
,
&
OHNNMemory
[
outputIndex
]);
ASSERT_EQ
(
OHNNMemory
[
outputIndex
],
nullptr
);
...
...
@@ -898,7 +890,7 @@ HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0400, Function |
for
(
auto
j
=
0
;
j
<
graphArgs
.
outputIndices
.
size
();
j
++
)
{
auto
outputIndex
=
graphArgs
.
inputIndices
.
size
()
+
j
;
// check memory output
EXPECT_TRUE
(
CheckOutput
(
static_cast
<
float
*>
(
const_cast
<
void
*>
(
OHNNMemory
[
outputIndex
]
->
data
)),
EXPECT_TRUE
(
CheckOutput
(
static_cast
<
float
*>
(
const_cast
<
void
*>
(
OHNNMemory
[
outputIndex
]
->
data
)),
(
float
*
)
avgModel
.
expectValue
));
OH_NNExecutor_DestroyOutputMemory
(
executor
,
j
,
&
OHNNMemory
[
outputIndex
]);
ASSERT_EQ
(
OHNNMemory
[
outputIndex
],
nullptr
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录