Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
61efa141
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
338
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
61efa141
编写于
9月 21, 2020
作者:
J
Jiansong Wang
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle-Lite
into jiansowa/img_nna
Just before commit.
上级
afab6742
1d3754aa
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
88 addition
and
26 deletion
+88
-26
lite/api/CMakeLists.txt
lite/api/CMakeLists.txt
+0
-1
lite/api/benchmark.cc
lite/api/benchmark.cc
+0
-2
lite/api/light_api_impl.cc
lite/api/light_api_impl.cc
+4
-0
lite/api/model_test.cc
lite/api/model_test.cc
+0
-2
lite/api/paddle_api_test.cc
lite/api/paddle_api_test.cc
+0
-2
lite/core/mir/subgraph/subgraph_pass_test.cc
lite/core/mir/subgraph/subgraph_pass_test.cc
+0
-2
lite/kernels/bm/bridges/box_coder_op.cc
lite/kernels/bm/bridges/box_coder_op.cc
+7
-1
lite/kernels/bm/bridges/cast_op.cc
lite/kernels/bm/bridges/cast_op.cc
+2
-1
lite/kernels/bm/bridges/elementwise_ops.cc
lite/kernels/bm/bridges/elementwise_ops.cc
+23
-4
lite/kernels/bm/bridges/multiclass_nms_op.cc
lite/kernels/bm/bridges/multiclass_nms_op.cc
+34
-4
lite/kernels/bm/bridges/paddle_use_bridges.h
lite/kernels/bm/bridges/paddle_use_bridges.h
+1
-0
lite/kernels/bm/bridges/yolo_box_op.cc
lite/kernels/bm/bridges/yolo_box_op.cc
+5
-5
lite/model_parser/flatbuffers/program_desc.h
lite/model_parser/flatbuffers/program_desc.h
+7
-0
lite/model_parser/model_parser.cc
lite/model_parser/model_parser.cc
+5
-2
未找到文件。
lite/api/CMakeLists.txt
浏览文件 @
61efa141
...
@@ -15,7 +15,6 @@ if ((NOT LITE_ON_TINY_PUBLISH) AND (LITE_WITH_CUDA OR LITE_WITH_X86 OR LITE_WITH
...
@@ -15,7 +15,6 @@ if ((NOT LITE_ON_TINY_PUBLISH) AND (LITE_WITH_CUDA OR LITE_WITH_X86 OR LITE_WITH
#full api dynamic library
#full api dynamic library
lite_cc_library
(
paddle_full_api_shared SHARED SRCS paddle_api.cc light_api.cc cxx_api.cc cxx_api_impl.cc light_api_impl.cc
lite_cc_library
(
paddle_full_api_shared SHARED SRCS paddle_api.cc light_api.cc cxx_api.cc cxx_api_impl.cc light_api_impl.cc
DEPS paddle_api paddle_api_light paddle_api_full
)
DEPS paddle_api paddle_api_light paddle_api_full
)
target_sources
(
paddle_full_api_shared PUBLIC
${
__lite_cc_files
}
)
add_dependencies
(
paddle_full_api_shared op_list_h kernel_list_h framework_proto op_registry fbs_headers
)
add_dependencies
(
paddle_full_api_shared op_list_h kernel_list_h framework_proto op_registry fbs_headers
)
target_link_libraries
(
paddle_full_api_shared framework_proto op_registry
)
target_link_libraries
(
paddle_full_api_shared framework_proto op_registry
)
if
(
LITE_WITH_X86
)
if
(
LITE_WITH_X86
)
...
...
lite/api/benchmark.cc
浏览文件 @
61efa141
...
@@ -30,8 +30,6 @@
...
@@ -30,8 +30,6 @@
#include <string>
#include <string>
#include <vector>
#include <vector>
#include "lite/api/paddle_api.h"
#include "lite/api/paddle_api.h"
#include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h"
#include "lite/core/device_info.h"
#include "lite/core/device_info.h"
#include "lite/utils/cp_logging.h"
#include "lite/utils/cp_logging.h"
#include "lite/utils/string.h"
#include "lite/utils/string.h"
...
...
lite/api/light_api_impl.cc
浏览文件 @
61efa141
...
@@ -17,6 +17,10 @@
...
@@ -17,6 +17,10 @@
#include "lite/api/paddle_api.h"
#include "lite/api/paddle_api.h"
#include "lite/core/version.h"
#include "lite/core/version.h"
#include "lite/model_parser/model_parser.h"
#include "lite/model_parser/model_parser.h"
#ifndef LITE_ON_TINY_PUBLISH
#include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h"
#endif
namespace
paddle
{
namespace
paddle
{
namespace
lite
{
namespace
lite
{
...
...
lite/api/model_test.cc
浏览文件 @
61efa141
...
@@ -25,8 +25,6 @@
...
@@ -25,8 +25,6 @@
#include "lite/core/profile/basic_profiler.h"
#include "lite/core/profile/basic_profiler.h"
#endif // LITE_WITH_PROFILE
#endif // LITE_WITH_PROFILE
#include <gflags/gflags.h>
#include <gflags/gflags.h>
#include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h"
using
paddle
::
lite
::
profile
::
Timer
;
using
paddle
::
lite
::
profile
::
Timer
;
...
...
lite/api/paddle_api_test.cc
浏览文件 @
61efa141
...
@@ -15,8 +15,6 @@
...
@@ -15,8 +15,6 @@
#include "lite/api/paddle_api.h"
#include "lite/api/paddle_api.h"
#include <gflags/gflags.h>
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h"
#include "lite/utils/cp_logging.h"
#include "lite/utils/cp_logging.h"
#include "lite/utils/io.h"
#include "lite/utils/io.h"
...
...
lite/core/mir/subgraph/subgraph_pass_test.cc
浏览文件 @
61efa141
...
@@ -17,8 +17,6 @@
...
@@ -17,8 +17,6 @@
#include <cmath>
#include <cmath>
#include "lite/api/paddle_api.h"
#include "lite/api/paddle_api.h"
#include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h"
#include "lite/api/test_helper.h"
#include "lite/api/test_helper.h"
#include "lite/utils/cp_logging.h"
#include "lite/utils/cp_logging.h"
#include "lite/utils/string.h"
#include "lite/utils/string.h"
...
...
lite/kernels/bm/bridges/box_coder_op.cc
浏览文件 @
61efa141
...
@@ -73,10 +73,16 @@ int BoxCoderConverter(void* ctx, OpLite* op, KernelBase* kernel) {
...
@@ -73,10 +73,16 @@ int BoxCoderConverter(void* ctx, OpLite* op, KernelBase* kernel) {
if
(
op_info
->
HasAttr
(
"variance"
))
{
if
(
op_info
->
HasAttr
(
"variance"
))
{
variance
=
op_info
->
GetAttr
<
std
::
vector
<
float
>>
(
"variance"
);
variance
=
op_info
->
GetAttr
<
std
::
vector
<
float
>>
(
"variance"
);
}
}
int
variance_len
=
variance
.
size
();
user_cpu_param_t
bm_param
;
user_cpu_param_t
bm_param
;
bm_param
.
op_type
=
USER_PADDLE_BOX_CODER
;
bm_param
.
op_type
=
USER_PADDLE_BOX_CODER
;
bm_param
.
u
.
box_coder_param
.
axis
=
axis
;
bm_param
.
u
.
box_coder_param
.
axis
=
axis
;
bm_param
.
u
.
box_coder_param
.
variance
=
&
variance
[
0
];
CHECK_LE
(
variance_len
,
2000
);
memset
(
bm_param
.
u
.
box_coder_param
.
variance
,
0
,
2000
*
sizeof
(
float
));
memcpy
(
bm_param
.
u
.
box_coder_param
.
variance
,
&
variance
[
0
],
variance_len
*
sizeof
(
float
));
bm_param
.
u
.
box_coder_param
.
variance_len
=
variance_len
;
bm_param
.
u
.
box_coder_param
.
code_type
=
bm_param
.
u
.
box_coder_param
.
code_type
=
(
code_type
==
"encode_center_size"
)
?
0
:
1
;
(
code_type
==
"encode_center_size"
)
?
0
:
1
;
bm_param
.
u
.
box_coder_param
.
normalized
=
box_normalized
;
bm_param
.
u
.
box_coder_param
.
normalized
=
box_normalized
;
...
...
lite/kernels/bm/bridges/cast_op.cc
浏览文件 @
61efa141
...
@@ -32,7 +32,8 @@ bool CvtDtype(int dtype, int* ptype) {
...
@@ -32,7 +32,8 @@ bool CvtDtype(int dtype, int* ptype) {
*
ptype
=
DTYPE_INT16
;
*
ptype
=
DTYPE_INT16
;
break
;
break
;
case
2
:
case
2
:
*
ptype
=
DTYPE_FP32
;
case
3
:
*
ptype
=
DTYPE_INT32
;
break
;
break
;
case
5
:
case
5
:
*
ptype
=
DTYPE_FP32
;
*
ptype
=
DTYPE_FP32
;
...
...
lite/kernels/bm/bridges/elementwise_ops.cc
浏览文件 @
61efa141
...
@@ -127,7 +127,7 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {
...
@@ -127,7 +127,7 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {
const
float
*
y_data
=
const_cast
<
const
float
*>
(
y
->
mutable_data
<
float
>
());
const
float
*
y_data
=
const_cast
<
const
float
*>
(
y
->
mutable_data
<
float
>
());
const
float
*
x_data
=
const_cast
<
const
float
*>
(
x
->
mutable_data
<
float
>
());
const
float
*
x_data
=
const_cast
<
const
float
*>
(
x
->
mutable_data
<
float
>
());
auto
unique_op_name
=
lite
::
subgraph
::
bm
::
UniqueName
(
"expand_ndims"
);
auto
unique_op_name
=
lite
::
subgraph
::
bm
::
UniqueName
(
"expand_ndims"
);
std
::
vector
<
int32_t
>
i_expand_shape_data
(
3
)
;
std
::
vector
<
int32_t
>
i_expand_shape_data
;
if
(
x_is_const
&&
y_is_const
)
{
if
(
x_is_const
&&
y_is_const
)
{
float
*
cpu_data
=
compute_elementwise_both_const
(
op
);
float
*
cpu_data
=
compute_elementwise_both_const
(
op
);
bm_add_const_tensor
(
graph
->
GetCompilerHandle
(),
bm_add_const_tensor
(
graph
->
GetCompilerHandle
(),
...
@@ -157,12 +157,31 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {
...
@@ -157,12 +157,31 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {
static_cast
<
const
char
*>
(
unique_op_name
.
c_str
()));
static_cast
<
const
char
*>
(
unique_op_name
.
c_str
()));
name
[
1
]
=
static_cast
<
const
char
*>
(
unique_op_name
.
c_str
());
name
[
1
]
=
static_cast
<
const
char
*>
(
unique_op_name
.
c_str
());
dim
[
1
]
=
3
;
dim
[
1
]
=
3
;
i_expand_shape_data
[
0
]
=
i_y_shape_data
[
0
]
;
i_expand_shape_data
.
push_back
(
i_y_shape_data
[
0
])
;
i_expand_shape_data
[
1
]
=
1
;
i_expand_shape_data
.
push_back
(
1
)
;
i_expand_shape_data
[
2
]
=
1
;
i_expand_shape_data
.
push_back
(
1
)
;
shape
[
1
]
=
&
i_expand_shape_data
[
0
];
shape
[
1
]
=
&
i_expand_shape_data
[
0
];
y_data
=
nullptr
;
y_data
=
nullptr
;
}
}
}
else
{
if
(
dim
[
1
]
<
dim
[
0
])
{
for
(
size_t
i
=
0
;
i
<
dim
[
1
];
i
++
)
{
i_expand_shape_data
.
push_back
(
i_y_shape_data
[
i
]);
}
for
(
size_t
i
=
dim
[
1
];
i
<
dim
[
0
];
i
++
)
{
i_expand_shape_data
.
push_back
(
1
);
}
add_reshape_layer_v2
(
graph
->
GetCompilerHandle
(),
name
[
1
],
shape
[
1
],
dim
[
1
],
static_cast
<
const
char
*>
(
unique_op_name
.
c_str
()),
const_cast
<
const
int
*>
(
&
i_expand_shape_data
[
0
]),
i_expand_shape_data
.
size
());
dim
[
1
]
=
dim
[
0
];
shape
[
1
]
=
&
i_expand_shape_data
[
0
];
name
[
1
]
=
static_cast
<
const
char
*>
(
unique_op_name
.
c_str
());
}
}
}
add_binary_layer_v2
(
graph
->
GetCompilerHandle
(),
add_binary_layer_v2
(
graph
->
GetCompilerHandle
(),
name
[
0
],
name
[
0
],
...
...
lite/kernels/bm/bridges/multiclass_nms_op.cc
浏览文件 @
61efa141
...
@@ -51,7 +51,7 @@ int MultiClassNMSConverter(void* ctx, OpLite* op, KernelBase* kernel) {
...
@@ -51,7 +51,7 @@ int MultiClassNMSConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto
score_threshold
=
op_info
->
GetAttr
<
float
>
(
"score_threshold"
);
auto
score_threshold
=
op_info
->
GetAttr
<
float
>
(
"score_threshold"
);
auto
nms_threshold
=
op_info
->
GetAttr
<
float
>
(
"nms_threshold"
);
auto
nms_threshold
=
op_info
->
GetAttr
<
float
>
(
"nms_threshold"
);
auto
nms_eta
=
op_info
->
GetAttr
<
float
>
(
"nms_eta"
);
auto
nms_eta
=
op_info
->
GetAttr
<
float
>
(
"nms_eta"
);
bool
normalized
;
bool
normalized
=
false
;
if
(
op_info
->
HasAttr
(
"normalized"
))
{
if
(
op_info
->
HasAttr
(
"normalized"
))
{
normalized
=
op_info
->
GetAttr
<
bool
>
(
"normalized"
);
normalized
=
op_info
->
GetAttr
<
bool
>
(
"normalized"
);
}
}
...
@@ -97,12 +97,39 @@ int MultiClassNMSConverter(void* ctx, OpLite* op, KernelBase* kernel) {
...
@@ -97,12 +97,39 @@ int MultiClassNMSConverter(void* ctx, OpLite* op, KernelBase* kernel) {
in_dim
[
1
]
=
score_dims
.
size
();
in_dim
[
1
]
=
score_dims
.
size
();
in_name
[
0
]
=
static_cast
<
const
char
*>
(
boxes_var_name
.
c_str
());
in_name
[
0
]
=
static_cast
<
const
char
*>
(
boxes_var_name
.
c_str
());
in_name
[
1
]
=
static_cast
<
const
char
*>
(
score_var_name
.
c_str
());
in_name
[
1
]
=
static_cast
<
const
char
*>
(
score_var_name
.
c_str
());
int32_t
*
out_shape
[
1
];
int32_t
*
out_shape
[
2
];
int32_t
out_dim
[
1
];
int32_t
out_dim
[
2
];
const
char
*
out_name
[
1
];
const
char
*
out_name
[
2
];
out_shape
[
0
]
=
&
i_out_shape_data
[
0
];
out_shape
[
0
]
=
&
i_out_shape_data
[
0
];
out_dim
[
0
]
=
out_dims
.
size
();
out_dim
[
0
]
=
out_dims
.
size
();
out_name
[
0
]
=
static_cast
<
const
char
*>
(
out_var_name
.
c_str
());
out_name
[
0
]
=
static_cast
<
const
char
*>
(
out_var_name
.
c_str
());
std
::
vector
<
int64_t
>
vec_index_dim
(
score_dims
.
size
());
std
::
vector
<
int32_t
>
i_out_index_shape_data
(
score_dims
.
size
());
std
::
string
out_index_name
=
""
;
if
(
op_type
==
"multiclass_nms2"
)
{
output_num
=
2
;
out_index_name
=
op_info
->
Output
(
"Index"
).
front
();
auto
out_index
=
scope
->
FindVar
(
out_index_name
)
->
GetMutable
<
lite
::
Tensor
>
();
if
(
3
==
score_dims
.
size
())
{
vec_index_dim
[
0
]
=
score_dims
[
0
];
vec_index_dim
[
1
]
=
keep_top_k
;
vec_index_dim
[
2
]
=
1
;
}
else
{
vec_index_dim
[
0
]
=
keep_top_k
;
vec_index_dim
[
1
]
=
1
;
}
DDimLite
index_dims
(
vec_index_dim
);
out_index
->
Resize
(
index_dims
);
out_index
->
mutable_data
<
float
>
();
for
(
size_t
i
=
0
;
i
<
index_dims
.
size
();
i
++
)
{
i_out_index_shape_data
[
i
]
=
static_cast
<
int32_t
>
(
index_dims
[
i
]);
}
out_shape
[
1
]
=
&
i_out_index_shape_data
[
0
];
out_dim
[
1
]
=
index_dims
.
size
();
out_name
[
1
]
=
static_cast
<
const
char
*>
(
out_index_name
.
c_str
());
}
add_user_cpu_layer
(
graph
->
GetCompilerHandle
(),
add_user_cpu_layer
(
graph
->
GetCompilerHandle
(),
input_num
,
input_num
,
in_shape
,
in_shape
,
...
@@ -126,3 +153,6 @@ int MultiClassNMSConverter(void* ctx, OpLite* op, KernelBase* kernel) {
...
@@ -126,3 +153,6 @@ int MultiClassNMSConverter(void* ctx, OpLite* op, KernelBase* kernel) {
REGISTER_SUBGRAPH_BRIDGE
(
multiclass_nms
,
REGISTER_SUBGRAPH_BRIDGE
(
multiclass_nms
,
kBM
,
kBM
,
paddle
::
lite
::
subgraph
::
bm
::
MultiClassNMSConverter
);
paddle
::
lite
::
subgraph
::
bm
::
MultiClassNMSConverter
);
REGISTER_SUBGRAPH_BRIDGE
(
multiclass_nms2
,
kBM
,
paddle
::
lite
::
subgraph
::
bm
::
MultiClassNMSConverter
);
lite/kernels/bm/bridges/paddle_use_bridges.h
浏览文件 @
61efa141
...
@@ -39,6 +39,7 @@ USE_SUBGRAPH_BRIDGE(norm, kBM);
...
@@ -39,6 +39,7 @@ USE_SUBGRAPH_BRIDGE(norm, kBM);
USE_SUBGRAPH_BRIDGE
(
prior_box
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
prior_box
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
box_coder
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
box_coder
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
multiclass_nms
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
multiclass_nms
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
multiclass_nms2
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
nearest_interp
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
nearest_interp
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
bilinear_interp
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
bilinear_interp
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
yolo_box
,
kBM
);
USE_SUBGRAPH_BRIDGE
(
yolo_box
,
kBM
);
...
...
lite/kernels/bm/bridges/yolo_box_op.cc
浏览文件 @
61efa141
...
@@ -67,17 +67,17 @@ int YoloBoxConverter(void* ctx, OpLite* op, KernelBase* kernel) {
...
@@ -67,17 +67,17 @@ int YoloBoxConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto
downsample_ratio
=
op_info
->
GetAttr
<
int
>
(
"downsample_ratio"
);
auto
downsample_ratio
=
op_info
->
GetAttr
<
int
>
(
"downsample_ratio"
);
auto
conf_thresh
=
op_info
->
GetAttr
<
float
>
(
"conf_thresh"
);
auto
conf_thresh
=
op_info
->
GetAttr
<
float
>
(
"conf_thresh"
);
auto
anchors
=
op_info
->
GetAttr
<
std
::
vector
<
int
>>
(
"anchors"
);
auto
anchors
=
op_info
->
GetAttr
<
std
::
vector
<
int
>>
(
"anchors"
);
int
*
anchors_buffer
=
static_cast
<
int
*>
(
malloc
(
sizeof
(
int
)
*
anchors
.
size
()));
CHECK_LE
(
anchors
.
size
(),
2000
);
CHECK
(
anchors_buffer
!=
nullptr
);
memcpy
(
anchors_buffer
,
&
anchors
[
0
],
sizeof
(
int
)
*
anchors
.
size
());
user_cpu_param_t
bm_param
;
user_cpu_param_t
bm_param
;
bm_param
.
op_type
=
USER_PADDLE_YOLO_BOX
;
bm_param
.
op_type
=
USER_PADDLE_YOLO_BOX
;
bm_param
.
u
.
yolo_box_param
.
class_num
=
class_num
;
bm_param
.
u
.
yolo_box_param
.
class_num
=
class_num
;
bm_param
.
u
.
yolo_box_param
.
downsample_ratio
=
downsample_ratio
;
bm_param
.
u
.
yolo_box_param
.
downsample_ratio
=
downsample_ratio
;
bm_param
.
u
.
yolo_box_param
.
conf_thresh
=
conf_thresh
;
bm_param
.
u
.
yolo_box_param
.
conf_thresh
=
conf_thresh
;
bm_param
.
u
.
yolo_box_param
.
anchors
=
anchors_buffer
;
memset
(
bm_param
.
u
.
yolo_box_param
.
anchors
,
0
,
2000
*
sizeof
(
int
));
memcpy
(
bm_param
.
u
.
yolo_box_param
.
anchors
,
&
anchors
[
0
],
anchors
.
size
()
*
sizeof
(
int
));
bm_param
.
u
.
yolo_box_param
.
anchors_size
=
anchors
.
size
();
bm_param
.
u
.
yolo_box_param
.
anchors_size
=
anchors
.
size
();
memcpy
(
anchors_buffer
,
&
anchors
[
0
],
sizeof
(
int
)
*
anchors
.
size
());
int32_t
input_num
=
2
;
int32_t
input_num
=
2
;
int32_t
output_num
=
2
;
int32_t
output_num
=
2
;
int32_t
*
in_shape
[
2
];
int32_t
*
in_shape
[
2
];
...
...
lite/model_parser/flatbuffers/program_desc.h
浏览文件 @
61efa141
...
@@ -79,6 +79,13 @@ class ProgramDescView : public ProgramDescAPI {
...
@@ -79,6 +79,13 @@ class ProgramDescView : public ProgramDescAPI {
return
desc_
->
version
()
->
version
();
return
desc_
->
version
()
->
version
();
}
}
void
ClearBlocks
()
override
{
CHECK_EQ
(
BlocksSize
(),
0u
)
<<
"For backward compatibility, in the "
"read-only flatbuffers version, this "
"interface degenerates to force the number "
"of blocks to be zero."
;
}
proto
::
ProgramDesc
const
*
raw_desc
()
const
{
return
desc_
;
}
proto
::
ProgramDesc
const
*
raw_desc
()
const
{
return
desc_
;
}
const
std
::
vector
<
char
>&
buf
()
const
{
return
buf_
;
}
const
std
::
vector
<
char
>&
buf
()
const
{
return
buf_
;
}
...
...
lite/model_parser/model_parser.cc
浏览文件 @
61efa141
...
@@ -1000,14 +1000,17 @@ void LoadModelNaiveFromMemory(const std::string &model_buffer,
...
@@ -1000,14 +1000,17 @@ void LoadModelNaiveFromMemory(const std::string &model_buffer,
#ifndef LITE_ON_TINY_PUBLISH
#ifndef LITE_ON_TINY_PUBLISH
LoadModelNaiveV0FromMemory
(
model_buffer
,
scope
,
cpp_prog
);
LoadModelNaiveV0FromMemory
(
model_buffer
,
scope
,
cpp_prog
);
#else
#else
LOG
(
FATAL
)
<<
"Error: Unsupported model type."
;
LOG
(
FATAL
)
<<
"Paddle-Lite v2.7 has upgraded the naive-buffer model "
"format. Please use the OPT to generate a new model. "
"Thanks!"
;
#endif
#endif
break
;
break
;
case
1
:
case
1
:
LoadModelNaiveV1FromMemory
(
model_buffer
,
scope
,
cpp_prog
);
LoadModelNaiveV1FromMemory
(
model_buffer
,
scope
,
cpp_prog
);
break
;
break
;
default:
default:
LOG
(
FATAL
)
<<
"Error: Unsupported model type."
;
LOG
(
FATAL
)
<<
"The model format cannot be recognized. Please make sure "
"you use the correct interface and model file."
;
break
;
break
;
}
}
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录