Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OAID
Tengine
提交
fe4a1c00
T
Tengine
项目概览
OAID
/
Tengine
10 个月 前同步成功
通知
53
Star
4429
Fork
1032
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
Tengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
未验证
提交
fe4a1c00
编写于
8月 01, 2021
作者:
reachfoo
提交者:
GitHub
8月 01, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add gather op test, haven't .tmfile (#967)
* add gather op 2 tests
上级
b9678cd6
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
295 addition
and
0 deletion
+295
-0
tests/CMakeLists.txt
tests/CMakeLists.txt
+2
-0
tests/op/test_onnx_op_gather_0.cpp
tests/op/test_onnx_op_gather_0.cpp
+146
-0
tests/op/test_onnx_op_gather_1.cpp
tests/op/test_onnx_op_gather_1.cpp
+147
-0
未找到文件。
tests/CMakeLists.txt
浏览文件 @
fe4a1c00
...
...
@@ -181,6 +181,8 @@ if(PROTOBUF_FOUND)
tengine_onnx_op_test
(
test_onnx_op_exp op/test_onnx_op_exp.cpp
)
# tengine_onnx_op_test(test_onnx_op_expand_dim_unchanged op/test_onnx_op_expand_dim_unchanged.cpp)
tengine_onnx_op_test
(
test_onnx_op_floor op/test_onnx_op_floor.cpp
)
# tengine_onnx_op_test(test_onnx_op_gather_0 op/test_onnx_op_gather_0.cpp) # onnx.tmfile 有问题
# tengine_onnx_op_test(test_onnx_op_gather_1 op/test_onnx_op_gather_1.cpp) # onnx.tmfile 有问题
tengine_onnx_op_test
(
test_onnx_op_globalaveragepool op/test_onnx_op_globalaveragepool.cpp
)
tengine_onnx_op_test
(
test_onnx_op_greater op/test_onnx_op_greater.cpp
)
tengine_onnx_op_test
(
test_onnx_op_gru_defaults op/test_onnx_op_gru_defaults.cpp
)
...
...
tests/op/test_onnx_op_gather_0.cpp
0 → 100644
浏览文件 @
fe4a1c00
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: sqfu@openailab.com
*/
#include "test_onnx_op.h"
std
::
string
node
=
"test_gather_0"
;
std
::
string
input_pb_0
=
"../onnx_node/"
+
node
+
"/test_data_set_0/input_0.pb"
;
std
::
string
input_pb_1
=
"../onnx_node/"
+
node
+
"/test_data_set_0/input_1.pb"
;
std
::
string
output_pb
=
"../onnx_node/"
+
node
+
"/test_data_set_0/output_0.pb"
;
std
::
string
model
=
"../onnx_node/"
+
node
+
"/onnx.tmfile"
;
int
main
(
int
argc
,
char
*
argv
[])
{
int
h_0
=
3
;
int
w_0
=
4
;
int
h_1
=
3
;
int
w_1
=
4
;
/* set runtime options */
struct
options
opt
;
opt
.
num_thread
=
1
;
opt
.
cluster
=
TENGINE_CLUSTER_ALL
;
opt
.
precision
=
TENGINE_MODE_FP32
;
opt
.
affinity
=
0
;
/* inital tengine */
if
(
init_tengine
()
!=
0
)
{
fprintf
(
stderr
,
"Initial tengine failed.
\n
"
);
return
-
1
;
}
/* create graph, load tengine model xxx.tmfile */
graph_t
graph
=
create_graph
(
nullptr
,
"tengine"
,
model
.
c_str
());
if
(
nullptr
==
graph
)
{
fprintf
(
stderr
,
"Create graph failed.
\n
"
);
return
-
1
;
}
/* set the shape, data buffer of input_tensor of the graph */
/* input 0 */
int
input_size_0
=
h_0
*
w_0
;
int
dims
[]
=
{
h_0
,
w_0
};
std
::
vector
<
float
>
feature_in_0
(
input_size_0
);
tensor_t
input_tensor_0
=
get_graph_input_tensor
(
graph
,
0
,
0
);
if
(
input_tensor_0
==
nullptr
)
{
fprintf
(
stderr
,
"Get input tensor_0 failed
\n
"
);
return
-
1
;
}
if
(
set_tensor_shape
(
input_tensor_0
,
dims
,
2
)
<
0
)
{
fprintf
(
stderr
,
"Set input tensor_0 shape failed
\n
"
);
return
-
1
;
}
if
(
set_tensor_buffer
(
input_tensor_0
,
feature_in_0
.
data
(),
input_size_0
*
4
)
<
0
)
{
fprintf
(
stderr
,
"Set input tensor_0 buffer failed
\n
"
);
return
-
1
;
}
/* input 1 */
int
input_size_1
=
h_1
*
w_1
;
int
dims_1
[]
=
{
h_1
,
w_1
};
std
::
vector
<
float
>
feature_in_1
(
input_size_1
);
tensor_t
input_tensor_1
=
get_graph_input_tensor
(
graph
,
1
,
0
);
if
(
input_tensor_1
==
nullptr
)
{
fprintf
(
stderr
,
"Get input tensor_1 failed
\n
"
);
return
-
1
;
}
if
(
set_tensor_shape
(
input_tensor_1
,
dims_1
,
2
)
<
0
)
{
fprintf
(
stderr
,
"Set input tensor_1 shape failed
\n
"
);
return
-
1
;
}
if
(
set_tensor_buffer
(
input_tensor_1
,
feature_in_1
.
data
(),
input_size_1
*
4
)
<
0
)
{
fprintf
(
stderr
,
"Set input tensor_1 buffer failed
\n
"
);
return
-
1
;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if
(
prerun_graph_multithread
(
graph
,
opt
)
<
0
)
{
fprintf
(
stderr
,
"Prerun multithread graph failed.
\n
"
);
return
-
1
;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data
(
feature_in_0
.
data
(),
input_pb_0
);
//It's not really pass,because the type of bool 9(boolean) is not support
get_pb_data
(
feature_in_1
.
data
(),
input_pb_1
);
//Same as above
/* run graph */
if
(
run_graph
(
graph
,
1
)
<
0
)
{
fprintf
(
stderr
,
"Run graph failed
\n
"
);
return
-
1
;
}
/* get the current result of inference */
tensor_t
output_tensor
=
get_graph_output_tensor
(
graph
,
0
,
0
);
float
*
output_data
=
(
float
*
)
get_tensor_buffer
(
output_tensor
);
int
output_size
=
get_tensor_buffer_size
(
output_tensor
)
/
sizeof
(
float
);
/* get the reference result of inference */
std
::
vector
<
float
>
reference_out
(
output_size
);
get_pb_data
(
reference_out
.
data
(),
output_pb
);
/* check the result */
int
ret
=
float_mismatch
(
output_data
,
reference_out
.
data
(),
output_size
);
/* release tengine */
postrun_graph
(
graph
);
destroy_graph
(
graph
);
release_tengine
();
return
ret
;
}
tests/op/test_onnx_op_gather_1.cpp
0 → 100644
浏览文件 @
fe4a1c00
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: sqfu@openailab.com
*/
#include "test_onnx_op.h"
std
::
string
node
=
"test_gather_1"
;
std
::
string
input_pb_0
=
"../onnx_node/"
+
node
+
"/test_data_set_0/input_0.pb"
;
std
::
string
input_pb_1
=
"../onnx_node/"
+
node
+
"/test_data_set_0/input_1.pb"
;
std
::
string
output_pb
=
"../onnx_node/"
+
node
+
"/test_data_set_0/output_0.pb"
;
std
::
string
model
=
"../onnx_node/"
+
node
+
"/onnx.tmfile"
;
int
main
(
int
argc
,
char
*
argv
[])
{
int
n_0
=
5
;
int
c_0
=
4
;
int
h_0
=
3
;
int
w_0
=
2
;
int
h_1
=
3
;
/* set runtime options */
struct
options
opt
;
opt
.
num_thread
=
1
;
opt
.
cluster
=
TENGINE_CLUSTER_ALL
;
opt
.
precision
=
TENGINE_MODE_FP32
;
opt
.
affinity
=
0
;
/* inital tengine */
if
(
init_tengine
()
!=
0
)
{
fprintf
(
stderr
,
"Initial tengine failed.
\n
"
);
return
-
1
;
}
/* create graph, load tengine model xxx.tmfile */
graph_t
graph
=
create_graph
(
nullptr
,
"tengine"
,
model
.
c_str
());
if
(
nullptr
==
graph
)
{
fprintf
(
stderr
,
"Create graph failed.
\n
"
);
return
-
1
;
}
/* set the shape, data buffer of input_tensor of the graph */
/* input 0 */
int
input_size_0
=
n_0
*
c_0
*
h_0
*
w_0
;
int
dims
[]
=
{
n_0
,
c_0
,
h_0
,
w_0
};
std
::
vector
<
float
>
feature_in_0
(
input_size_0
);
tensor_t
input_tensor_0
=
get_graph_input_tensor
(
graph
,
0
,
0
);
if
(
input_tensor_0
==
nullptr
)
{
fprintf
(
stderr
,
"Get input tensor_0 failed
\n
"
);
return
-
1
;
}
if
(
set_tensor_shape
(
input_tensor_0
,
dims
,
4
)
<
0
)
{
fprintf
(
stderr
,
"Set input tensor_0 shape failed
\n
"
);
return
-
1
;
}
if
(
set_tensor_buffer
(
input_tensor_0
,
feature_in_0
.
data
(),
input_size_0
*
4
)
<
0
)
{
fprintf
(
stderr
,
"Set input tensor_0 buffer failed
\n
"
);
return
-
1
;
}
/* input 1 */
int
input_size_1
=
h_1
;
int
dims_1
[]
=
{
h_1
};
std
::
vector
<
float
>
feature_in_1
(
input_size_1
);
tensor_t
input_tensor_1
=
get_graph_input_tensor
(
graph
,
1
,
0
);
if
(
input_tensor_1
==
nullptr
)
{
fprintf
(
stderr
,
"Get input tensor_1 failed
\n
"
);
return
-
1
;
}
if
(
set_tensor_shape
(
input_tensor_1
,
dims_1
,
1
)
<
0
)
{
fprintf
(
stderr
,
"Set input tensor_1 shape failed
\n
"
);
return
-
1
;
}
if
(
set_tensor_buffer
(
input_tensor_1
,
feature_in_1
.
data
(),
input_size_1
*
4
)
<
0
)
{
fprintf
(
stderr
,
"Set input tensor_1 buffer failed
\n
"
);
return
-
1
;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data
(
feature_in_0
.
data
(),
input_pb_0
);
get_pb_data
(
feature_in_1
.
data
(),
input_pb_1
);
/* prerun graph, set work options(num_thread, cluster, precision) */
if
(
prerun_graph_multithread
(
graph
,
opt
)
<
0
)
{
fprintf
(
stderr
,
"Prerun multithread graph failed.
\n
"
);
return
-
1
;
}
/* run graph */
if
(
run_graph
(
graph
,
1
)
<
0
)
{
fprintf
(
stderr
,
"Run graph failed
\n
"
);
return
-
1
;
}
/* get the current result of inference */
tensor_t
output_tensor
=
get_graph_output_tensor
(
graph
,
0
,
0
);
float
*
output_data
=
(
float
*
)
get_tensor_buffer
(
output_tensor
);
int
output_size
=
get_tensor_buffer_size
(
output_tensor
)
/
sizeof
(
float
);
/* get the reference result of inference */
std
::
vector
<
float
>
reference_out
(
output_size
);
get_pb_data
(
reference_out
.
data
(),
output_pb
);
/* check the result */
int
ret
=
float_mismatch
(
output_data
,
reference_out
.
data
(),
output_size
);
/* release tengine */
postrun_graph
(
graph
);
destroy_graph
(
graph
);
release_tengine
();
return
ret
;
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录