Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
7c5d8401
MegEngine
项目概览
MegEngine 天元
/
MegEngine
大约 1 年 前同步成功
通知
399
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
7c5d8401
编写于
12月 22, 2021
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor(lite): refactor lite example
GitOrigin-RevId: 3eac582fb2757742a83a9d52f32246ad59b29401
上级
aa80d988
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
152 addition
and
167 deletion
+152
-167
lite/example/cpp_example/CMakeLists.txt
lite/example/cpp_example/CMakeLists.txt
+2
-0
lite/example/cpp_example/example.h
lite/example/cpp_example/example.h
+6
-43
lite/example/cpp_example/main.cpp
lite/example/cpp_example/main.cpp
+2
-37
lite/example/cpp_example/mge/basic.cpp
lite/example/cpp_example/mge/basic.cpp
+74
-61
lite/example/cpp_example/mge/cpu_affinity.cpp
lite/example/cpp_example/mge/cpu_affinity.cpp
+6
-2
lite/example/cpp_example/mge/cv/detect_yolox.cpp
lite/example/cpp_example/mge/cv/detect_yolox.cpp
+9
-4
lite/example/cpp_example/mge/cv/picture_classification.cpp
lite/example/cpp_example/mge/cv/picture_classification.cpp
+5
-3
lite/example/cpp_example/mge/device_io.cpp
lite/example/cpp_example/mge/device_io.cpp
+11
-4
lite/example/cpp_example/mge/lite_c_interface.cpp
lite/example/cpp_example/mge/lite_c_interface.cpp
+6
-1
lite/example/cpp_example/mge/network_share_weights.cpp
lite/example/cpp_example/mge/network_share_weights.cpp
+8
-2
lite/example/cpp_example/mge/reset_io.cpp
lite/example/cpp_example/mge/reset_io.cpp
+10
-3
lite/example/cpp_example/mge/user_allocator.cpp
lite/example/cpp_example/mge/user_allocator.cpp
+6
-3
lite/example/cpp_example/mge/user_cryption.cpp
lite/example/cpp_example/mge/user_cryption.cpp
+7
-4
未找到文件。
lite/example/cpp_example/CMakeLists.txt
浏览文件 @
7c5d8401
file
(
GLOB_RECURSE SOURCES ./*.cpp
)
add_executable
(
lite_examples
${
SOURCES
}
)
target_include_directories
(
lite_examples PUBLIC ./
)
if
(
LITE_BUILD_WITH_RKNPU
)
#rknn sdk1.0.0 depend on libc++_shared, use gold to remove NEEDED so symbol check
...
...
@@ -33,6 +34,7 @@ if(LITE_BUILD_WITH_RKNPU)
endif
()
target_link_libraries
(
lite_examples_depends_shared lite_shared
)
target_include_directories
(
lite_examples_depends_shared PUBLIC ./
)
if
(
UNIX
)
if
(
APPLE OR ANDROID
)
...
...
lite/example/cpp_example/example.h
浏览文件 @
7c5d8401
...
...
@@ -49,57 +49,20 @@ ExampleFuncMap* get_example_function_map();
bool
register_example
(
std
::
string
example_name
,
const
ExampleFunc
&
fuction
);
template
<
int
>
struct
Register
;
#if LITE_BUILD_WITH_MGE
bool
basic_load_from_path
(
const
Args
&
args
);
bool
basic_load_from_path_with_loader
(
const
Args
&
args
);
bool
basic_load_from_memory
(
const
Args
&
args
);
bool
cpu_affinity
(
const
Args
&
args
);
bool
network_share_same_weights
(
const
Args
&
args
);
bool
reset_input
(
const
Args
&
args
);
bool
reset_input_output
(
const
Args
&
args
);
bool
config_user_allocator
(
const
Args
&
args
);
bool
register_cryption_method
(
const
Args
&
args
);
bool
update_cryption_key
(
const
Args
&
args
);
bool
async_forward
(
const
Args
&
args
);
bool
set_input_callback
(
const
Args
&
arg
);
bool
set_output_callback
(
const
Args
&
arg
);
bool
picture_classification
(
const
Args
&
arg
);
bool
detect_yolox
(
const
Args
&
arg
);
#if LITE_WITH_CUDA
bool
load_from_path_run_cuda
(
const
Args
&
args
);
bool
device_input
(
const
Args
&
args
);
bool
device_input_output
(
const
Args
&
args
);
bool
pinned_host_input
(
const
Args
&
args
);
#endif
#endif
}
// namespace example
}
// namespace lite
#if LITE_BUILD_WITH_MGE
bool
basic_c_interface
(
const
lite
::
example
::
Args
&
args
);
bool
device_io_c_interface
(
const
lite
::
example
::
Args
&
args
);
bool
async_c_interface
(
const
lite
::
example
::
Args
&
args
);
#endif
#define CONCAT_IMPL(a, b) a##b
#define MACRO_CONCAT(a, b) CONCAT_IMPL(a, b)
#define REGIST_EXAMPLE(name_, func_) REGIST_EXAMPLE_WITH_NUM(__COUNTER__, name_, func_)
#define REGIST_EXAMPLE_WITH_NUM(number_, name_, func_) \
template <> \
struct Register<number_> { \
Register() { register_example(name_, func_); } \
}; \
namespace { \
Register<number_> MACRO_CONCAT(example_function_, number_); \
#define REGIST_EXAMPLE_WITH_NUM(number_, name_, func_) \
struct Register_##func_ { \
Register_##func_() { lite::example::register_example(name_, func_); } \
}; \
namespace { \
Register_##func_ MACRO_CONCAT(func_, number_); \
}
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
lite/example/cpp_example/main.cpp
浏览文件 @
7c5d8401
...
...
@@ -60,7 +60,8 @@ bool lite::example::register_example(
std
::
string
example_name
,
const
ExampleFunc
&
fuction
)
{
auto
map
=
get_example_function_map
();
if
(
map
->
find
(
example_name
)
!=
map
->
end
())
{
printf
(
"Error!!! This example is registed yet
\n
"
);
printf
(
"example_name: %s Error!!! This example is registed yet
\n
"
,
example_name
.
c_str
());
return
false
;
}
(
*
map
)[
example_name
]
=
fuction
;
...
...
@@ -142,41 +143,5 @@ int main(int argc, char** argv) {
return
-
1
;
}
}
namespace
lite
{
namespace
example
{
#if LITE_BUILD_WITH_MGE
#if LITE_WITH_CUDA
REGIST_EXAMPLE
(
"load_from_path_run_cuda"
,
load_from_path_run_cuda
);
#endif
REGIST_EXAMPLE
(
"basic_load_from_path"
,
basic_load_from_path
);
REGIST_EXAMPLE
(
"basic_load_from_path_with_loader"
,
basic_load_from_path_with_loader
);
REGIST_EXAMPLE
(
"basic_load_from_memory"
,
basic_load_from_memory
);
REGIST_EXAMPLE
(
"cpu_affinity"
,
cpu_affinity
);
REGIST_EXAMPLE
(
"register_cryption_method"
,
register_cryption_method
);
REGIST_EXAMPLE
(
"update_cryption_key"
,
update_cryption_key
);
REGIST_EXAMPLE
(
"network_share_same_weights"
,
network_share_same_weights
);
REGIST_EXAMPLE
(
"reset_input"
,
reset_input
);
REGIST_EXAMPLE
(
"reset_input_output"
,
reset_input_output
);
REGIST_EXAMPLE
(
"config_user_allocator"
,
config_user_allocator
);
REGIST_EXAMPLE
(
"async_forward"
,
async_forward
);
REGIST_EXAMPLE
(
"set_input_callback"
,
set_input_callback
);
REGIST_EXAMPLE
(
"set_output_callback"
,
set_output_callback
);
REGIST_EXAMPLE
(
"basic_c_interface"
,
basic_c_interface
);
REGIST_EXAMPLE
(
"device_io_c_interface"
,
device_io_c_interface
);
REGIST_EXAMPLE
(
"async_c_interface"
,
async_c_interface
);
REGIST_EXAMPLE
(
"picture_classification"
,
picture_classification
);
REGIST_EXAMPLE
(
"detect_yolox"
,
detect_yolox
);
#if LITE_WITH_CUDA
REGIST_EXAMPLE
(
"device_input"
,
device_input
);
REGIST_EXAMPLE
(
"device_input_output"
,
device_input_output
);
REGIST_EXAMPLE
(
"pinned_host_input"
,
pinned_host_input
);
#endif
#endif
}
// namespace example
}
// namespace lite
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
lite/example/cpp_example/mge/basic.cpp
浏览文件 @
7c5d8401
...
...
@@ -10,7 +10,7 @@
*/
#include <thread>
#include "
../
example.h"
#include "example.h"
#if LITE_BUILD_WITH_MGE
#include <cstdio>
...
...
@@ -77,61 +77,8 @@ void output_data_info(std::shared_ptr<Network> network, size_t output_size) {
}
}
// namespace
#if LITE_WITH_CUDA
bool
lite
::
example
::
load_from_path_run_cuda
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
set_log_level
(
LiteLogLevel
::
DEBUG
);
//! config the network running in CUDA device
lite
::
Config
config
{
false
,
-
1
,
LiteDeviceType
::
LITE_CUDA
};
//! set NetworkIO
NetworkIO
network_io
;
std
::
string
input_name
=
"img0_comp_fullface"
;
bool
is_host
=
false
;
IO
device_input
{
input_name
,
is_host
};
network_io
.
inputs
.
push_back
(
device_input
);
//! create and load the network
std
::
shared_ptr
<
Network
>
network
=
std
::
make_shared
<
Network
>
(
config
,
network_io
);
network
->
load_model
(
network_path
);
std
::
shared_ptr
<
Tensor
>
input_tensor
=
network
->
get_input_tensor
(
0
);
Layout
input_layout
=
input_tensor
->
get_layout
();
//! read data from numpy data file
auto
src_tensor
=
parse_npy
(
input_path
);
//! malloc the device memory
auto
tensor_device
=
Tensor
(
LiteDeviceType
::
LITE_CUDA
,
input_layout
);
//! copy to the device memory
tensor_device
.
copy_from
(
*
src_tensor
);
//! Now the device memory if filled with user input data, set it to the
//! input tensor
input_tensor
->
reset
(
tensor_device
.
get_memory_ptr
(),
input_layout
);
//! forward
{
lite
::
Timer
ltimer
(
"warmup"
);
network
->
forward
();
network
->
wait
();
ltimer
.
print_used_time
(
0
);
}
lite
::
Timer
ltimer
(
"forward_iter"
);
for
(
int
i
=
0
;
i
<
10
;
i
++
)
{
ltimer
.
reset_start
();
network
->
forward
();
network
->
wait
();
ltimer
.
print_used_time
(
i
);
}
//! get the output data or read tensor set in network_in
size_t
output_size
=
network
->
get_all_output_name
().
size
();
output_info
(
network
,
output_size
);
output_data_info
(
network
,
output_size
);
return
true
;
}
#endif
bool
lite
::
example
::
basic_load_from_path
(
const
Args
&
args
)
{
namespace
{
bool
basic_load_from_path
(
const
Args
&
args
)
{
set_log_level
(
LiteLogLevel
::
DEBUG
);
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -193,7 +140,7 @@ bool lite::example::basic_load_from_path(const Args& args) {
return
true
;
}
bool
lite
::
example
::
basic_load_from_path_with_loader
(
const
Args
&
args
)
{
bool
basic_load_from_path_with_loader
(
const
Args
&
args
)
{
set_log_level
(
LiteLogLevel
::
DEBUG
);
lite
::
set_loader_lib_path
(
args
.
loader_path
);
std
::
string
network_path
=
args
.
model_path
;
...
...
@@ -251,7 +198,7 @@ bool lite::example::basic_load_from_path_with_loader(const Args& args) {
return
true
;
}
bool
lite
::
example
::
basic_load_from_memory
(
const
Args
&
args
)
{
bool
basic_load_from_memory
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -307,7 +254,7 @@ bool lite::example::basic_load_from_memory(const Args& args) {
return
true
;
}
bool
lite
::
example
::
async_forward
(
const
Args
&
args
)
{
bool
async_forward
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
Config
config
;
...
...
@@ -366,7 +313,7 @@ bool lite::example::async_forward(const Args& args) {
return
true
;
}
bool
lite
::
example
::
set_input_callback
(
const
Args
&
args
)
{
bool
set_input_callback
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
Config
config
;
...
...
@@ -433,7 +380,7 @@ bool lite::example::set_input_callback(const Args& args) {
return
true
;
}
bool
lite
::
example
::
set_output_callback
(
const
Args
&
args
)
{
bool
set_output_callback
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
Config
config
;
...
...
@@ -500,7 +447,73 @@ bool lite::example::set_output_callback(const Args& args) {
printf
(
"max=%e, sum=%e
\n
"
,
max
,
sum
);
return
true
;
}
}
// namespace
REGIST_EXAMPLE
(
"basic_load_from_path"
,
basic_load_from_path
);
REGIST_EXAMPLE
(
"basic_load_from_path_with_loader"
,
basic_load_from_path_with_loader
);
REGIST_EXAMPLE
(
"basic_load_from_memory"
,
basic_load_from_memory
);
REGIST_EXAMPLE
(
"async_forward"
,
async_forward
);
REGIST_EXAMPLE
(
"set_input_callback"
,
set_input_callback
);
REGIST_EXAMPLE
(
"set_output_callback"
,
set_output_callback
);
#if LITE_WITH_CUDA
namespace
{
bool
load_from_path_run_cuda
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
set_log_level
(
LiteLogLevel
::
DEBUG
);
//! config the network running in CUDA device
lite
::
Config
config
{
false
,
-
1
,
LiteDeviceType
::
LITE_CUDA
};
//! set NetworkIO
NetworkIO
network_io
;
std
::
string
input_name
=
"img0_comp_fullface"
;
bool
is_host
=
false
;
IO
device_input
{
input_name
,
is_host
};
network_io
.
inputs
.
push_back
(
device_input
);
//! create and load the network
std
::
shared_ptr
<
Network
>
network
=
std
::
make_shared
<
Network
>
(
config
,
network_io
);
network
->
load_model
(
network_path
);
std
::
shared_ptr
<
Tensor
>
input_tensor
=
network
->
get_input_tensor
(
0
);
Layout
input_layout
=
input_tensor
->
get_layout
();
//! read data from numpy data file
auto
src_tensor
=
parse_npy
(
input_path
);
//! malloc the device memory
auto
tensor_device
=
Tensor
(
LiteDeviceType
::
LITE_CUDA
,
input_layout
);
//! copy to the device memory
tensor_device
.
copy_from
(
*
src_tensor
);
//! Now the device memory if filled with user input data, set it to the
//! input tensor
input_tensor
->
reset
(
tensor_device
.
get_memory_ptr
(),
input_layout
);
//! forward
{
lite
::
Timer
ltimer
(
"warmup"
);
network
->
forward
();
network
->
wait
();
ltimer
.
print_used_time
(
0
);
}
lite
::
Timer
ltimer
(
"forward_iter"
);
for
(
int
i
=
0
;
i
<
10
;
i
++
)
{
ltimer
.
reset_start
();
network
->
forward
();
network
->
wait
();
ltimer
.
print_used_time
(
i
);
}
//! get the output data or read tensor set in network_in
size_t
output_size
=
network
->
get_all_output_name
().
size
();
output_info
(
network
,
output_size
);
output_data_info
(
network
,
output_size
);
return
true
;
}
}
// namespace
REGIST_EXAMPLE
(
"load_from_path_run_cuda"
,
load_from_path_run_cuda
);
#endif
#endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
lite/example/cpp_example/mge/cpu_affinity.cpp
浏览文件 @
7c5d8401
...
...
@@ -9,13 +9,14 @@
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "
../
example.h"
#include "example.h"
#if LITE_BUILD_WITH_MGE
using
namespace
lite
;
using
namespace
example
;
bool
lite
::
example
::
cpu_affinity
(
const
Args
&
args
)
{
namespace
{
bool
cpu_affinity
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -65,6 +66,9 @@ bool lite::example::cpu_affinity(const Args& args) {
printf
(
"max=%e, sum=%e
\n
"
,
max
,
sum
);
return
true
;
}
}
// namespace
REGIST_EXAMPLE
(
"cpu_affinity"
,
cpu_affinity
);
#endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
lite/example/cpp_example/mge/cv/detect_yolox.cpp
浏览文件 @
7c5d8401
...
...
@@ -10,7 +10,7 @@
*/
#include <thread>
#include "
../../
example.h"
#include "example.h"
#if LITE_BUILD_WITH_MGE
#include <cstdio>
...
...
@@ -289,6 +289,10 @@ void decode_outputs(
void
draw_objects
(
uint8_t
*
image
,
int
width
,
int
height
,
int
channel
,
const
std
::
vector
<
Object
>&
objects
)
{
(
void
)
image
;
(
void
)
width
;
(
void
)
height
;
(
void
)
channel
;
for
(
size_t
i
=
0
;
i
<
objects
.
size
();
i
++
)
{
const
Object
&
obj
=
objects
[
i
];
...
...
@@ -297,9 +301,7 @@ void draw_objects(
}
}
}
// namespace
bool
lite
::
example
::
detect_yolox
(
const
Args
&
args
)
{
bool
detect_yolox
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -332,6 +334,9 @@ bool lite::example::detect_yolox(const Args& args) {
stbi_image_free
(
image
);
return
0
;
}
}
// namespace
REGIST_EXAMPLE
(
"detect_yolox"
,
detect_yolox
);
#endif
...
...
lite/example/cpp_example/mge/cv/picture_classification.cpp
浏览文件 @
7c5d8401
...
...
@@ -10,7 +10,7 @@
*/
#include <thread>
#include "
../../
example.h"
#include "example.h"
#if LITE_BUILD_WITH_MGE
#include <cstdio>
...
...
@@ -80,9 +80,8 @@ void classfication_process(
}
printf
(
"output tensor sum is %f
\n
"
,
sum
);
}
}
// namespace
bool
lite
::
example
::
picture_classification
(
const
Args
&
args
)
{
bool
picture_classification
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -109,6 +108,9 @@ bool lite::example::picture_classification(const Args& args) {
class_id
,
score
);
return
0
;
}
}
// namespace
REGIST_EXAMPLE
(
"picture_classification"
,
picture_classification
);
#endif
...
...
lite/example/cpp_example/mge/device_io.cpp
浏览文件 @
7c5d8401
...
...
@@ -10,15 +10,17 @@
*/
#include <thread>
#include "
../
example.h"
#include "example.h"
#if LITE_BUILD_WITH_MGE
#include "misc.h"
using
namespace
lite
;
using
namespace
example
;
#if LITE_WITH_CUDA
bool
lite
::
example
::
device_input
(
const
Args
&
args
)
{
namespace
{
bool
device_input
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -73,7 +75,7 @@ bool lite::example::device_input(const Args& args) {
return
true
;
}
bool
lite
::
example
::
device_input_output
(
const
Args
&
args
)
{
bool
device_input_output
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -136,7 +138,7 @@ bool lite::example::device_input_output(const Args& args) {
return
true
;
}
bool
lite
::
example
::
pinned_host_input
(
const
Args
&
args
)
{
bool
pinned_host_input
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -181,6 +183,11 @@ bool lite::example::pinned_host_input(const Args& args) {
printf
(
"max=%e, sum=%e
\n
"
,
max
,
sum
);
return
true
;
}
}
// namespace
REGIST_EXAMPLE
(
"device_input"
,
device_input
);
REGIST_EXAMPLE
(
"device_input_output"
,
device_input_output
);
REGIST_EXAMPLE
(
"pinned_host_input"
,
pinned_host_input
);
#endif
#endif
...
...
lite/example/cpp_example/mge/lite_c_interface.cpp
浏览文件 @
7c5d8401
...
...
@@ -9,7 +9,7 @@
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "
../
example.h"
#include "example.h"
#include "misc.h"
#if LITE_BUILD_WITH_MGE
#include "lite-c/global_c.h"
...
...
@@ -218,5 +218,10 @@ bool async_c_interface(const lite::example::Args& args) {
printf
(
"max=%e, sum=%e
\n
"
,
max
,
sum
);
return
true
;
}
REGIST_EXAMPLE
(
"basic_c_interface"
,
basic_c_interface
);
REGIST_EXAMPLE
(
"device_io_c_interface"
,
device_io_c_interface
);
REGIST_EXAMPLE
(
"async_c_interface"
,
async_c_interface
);
#endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
lite/example/cpp_example/mge/network_share_weights.cpp
浏览文件 @
7c5d8401
...
...
@@ -9,13 +9,15 @@
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "
../
example.h"
#include "example.h"
#if LITE_BUILD_WITH_MGE
using
namespace
lite
;
using
namespace
example
;
bool
lite
::
example
::
network_share_same_weights
(
const
Args
&
args
)
{
namespace
{
bool
network_share_same_weights
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -75,5 +77,9 @@ bool lite::example::network_share_same_weights(const Args& args) {
printf
(
"max=%e, sum=%e
\n
"
,
max
,
sum
);
return
true
;
}
}
// namespace
REGIST_EXAMPLE
(
"network_share_same_weights"
,
network_share_same_weights
);
#endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
lite/example/cpp_example/mge/reset_io.cpp
浏览文件 @
7c5d8401
...
...
@@ -9,13 +9,15 @@
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "
../
example.h"
#include "example.h"
#if LITE_BUILD_WITH_MGE
using
namespace
lite
;
using
namespace
example
;
bool
lite
::
example
::
reset_input
(
const
Args
&
args
)
{
namespace
{
bool
reset_input
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
lite
::
Config
config
;
...
...
@@ -53,7 +55,7 @@ bool lite::example::reset_input(const Args& args) {
return
true
;
}
bool
lite
::
example
::
reset_input_output
(
const
Args
&
args
)
{
bool
reset_input_output
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
lite
::
Config
config
;
...
...
@@ -92,5 +94,10 @@ bool lite::example::reset_input_output(const Args& args) {
printf
(
"max=%e, sum=%e
\n
"
,
max
,
sum
);
return
true
;
}
}
// namespace
REGIST_EXAMPLE
(
"reset_input"
,
reset_input
);
REGIST_EXAMPLE
(
"reset_input_output"
,
reset_input_output
);
#endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
lite/example/cpp_example/mge/user_allocator.cpp
浏览文件 @
7c5d8401
...
...
@@ -9,7 +9,7 @@
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "
../
example.h"
#include "example.h"
#if LITE_BUILD_WITH_MGE
using
namespace
lite
;
using
namespace
example
;
...
...
@@ -42,9 +42,8 @@ public:
#endif
};
};
}
// namespace
bool
lite
::
example
::
config_user_allocator
(
const
Args
&
args
)
{
bool
config_user_allocator
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -87,5 +86,9 @@ bool lite::example::config_user_allocator(const Args& args) {
printf
(
"max=%e, sum=%e
\n
"
,
max
,
sum
);
return
true
;
}
}
// namespace
REGIST_EXAMPLE
(
"config_user_allocator"
,
config_user_allocator
);
#endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
lite/example/cpp_example/mge/user_cryption.cpp
浏览文件 @
7c5d8401
...
...
@@ -9,7 +9,7 @@
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "
../
example.h"
#include "example.h"
#if LITE_BUILD_WITH_MGE
using
namespace
lite
;
...
...
@@ -31,9 +31,8 @@ std::vector<uint8_t> decrypt_model(
return
{};
}
}
}
// namespace
bool
lite
::
example
::
register_cryption_method
(
const
Args
&
args
)
{
bool
register_cryption_method
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -75,7 +74,7 @@ bool lite::example::register_cryption_method(const Args& args) {
return
true
;
}
bool
lite
::
example
::
update_cryption_key
(
const
Args
&
args
)
{
bool
update_cryption_key
(
const
Args
&
args
)
{
std
::
string
network_path
=
args
.
model_path
;
std
::
string
input_path
=
args
.
input_path
;
...
...
@@ -120,5 +119,9 @@ bool lite::example::update_cryption_key(const Args& args) {
printf
(
"max=%e, sum=%e
\n
"
,
max
,
sum
);
return
true
;
}
}
// namespace
REGIST_EXAMPLE
(
"register_cryption_method"
,
register_cryption_method
);
REGIST_EXAMPLE
(
"update_cryption_key"
,
update_cryption_key
);
#endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录