Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
2997b937
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2997b937
编写于
4月 15, 2020
作者:
M
MaxwellDing
提交者:
GitHub
4月 15, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor(*): reduce Wsign-compare warning (#3391)
refactor(*): reduce Wsign-compare warning
上级
3d2a99e7
变更
65
隐藏空白更改
内联
并排
Showing
65 changed file
with
164 addition
and
162 deletion
+164
-162
lite/api/light_api.cc
lite/api/light_api.cc
+4
-4
lite/api/light_api_test.cc
lite/api/light_api_test.cc
+2
-2
lite/api/lite_multithread_test.cc
lite/api/lite_multithread_test.cc
+2
-2
lite/api/model_test.cc
lite/api/model_test.cc
+1
-1
lite/api/model_test_classify.cc
lite/api/model_test_classify.cc
+1
-1
lite/api/model_test_detection.cc
lite/api/model_test_detection.cc
+2
-2
lite/api/paddle_api_test.cc
lite/api/paddle_api_test.cc
+2
-2
lite/api/test_googlenet_lite.cc
lite/api/test_googlenet_lite.cc
+2
-2
lite/api/test_inceptionv4_lite_x86.cc
lite/api/test_inceptionv4_lite_x86.cc
+4
-4
lite/api/test_mobilenetv1_lite_x86.cc
lite/api/test_mobilenetv1_lite_x86.cc
+4
-4
lite/api/test_mobilenetv2_lite_x86.cc
lite/api/test_mobilenetv2_lite_x86.cc
+4
-4
lite/api/test_resnet50_lite_x86.cc
lite/api/test_resnet50_lite_x86.cc
+4
-4
lite/api/transform_test.cc
lite/api/transform_test.cc
+2
-2
lite/backends/x86/jit/gen/matmul.cc
lite/backends/x86/jit/gen/matmul.cc
+1
-1
lite/backends/x86/math/beam_search.cc
lite/backends/x86/math/beam_search.cc
+1
-1
lite/backends/x86/math/blas.cc
lite/backends/x86/math/blas.cc
+1
-1
lite/backends/x86/math/sequence_pooling.cc
lite/backends/x86/math/sequence_pooling.cc
+7
-7
lite/core/arena/framework.cc
lite/core/arena/framework.cc
+2
-2
lite/core/arena/framework.h
lite/core/arena/framework.h
+1
-1
lite/core/device_info.cc
lite/core/device_info.cc
+1
-1
lite/core/kernel.cc
lite/core/kernel.cc
+1
-1
lite/core/mir/fusion/conv_bn_fuser.cc
lite/core/mir/fusion/conv_bn_fuser.cc
+10
-10
lite/core/mir/fusion/quant_dequant_op_fuser.cc
lite/core/mir/fusion/quant_dequant_op_fuser.cc
+1
-1
lite/core/mir/mlu_postprocess_pass.cc
lite/core/mir/mlu_postprocess_pass.cc
+2
-2
lite/core/mir/subgraph/subgraph_detector.cc
lite/core/mir/subgraph/subgraph_detector.cc
+4
-3
lite/core/mir/subgraph/subgraph_detector_test.cc
lite/core/mir/subgraph/subgraph_detector_test.cc
+5
-5
lite/core/mir/subgraph/subgraph_pass_test.cc
lite/core/mir/subgraph/subgraph_pass_test.cc
+3
-3
lite/core/op_lite.cc
lite/core/op_lite.cc
+4
-4
lite/core/program.cc
lite/core/program.cc
+1
-1
lite/core/tensor.cc
lite/core/tensor.cc
+1
-1
lite/kernels/mlu/bridges/act_op_test.cc
lite/kernels/mlu/bridges/act_op_test.cc
+8
-8
lite/kernels/mlu/bridges/concat_op_test.cc
lite/kernels/mlu/bridges/concat_op_test.cc
+3
-3
lite/kernels/mlu/bridges/conv_op.cc
lite/kernels/mlu/bridges/conv_op.cc
+6
-6
lite/kernels/mlu/bridges/conv_op_test.cc
lite/kernels/mlu/bridges/conv_op_test.cc
+2
-2
lite/kernels/mlu/bridges/fc_op_test.cc
lite/kernels/mlu/bridges/fc_op_test.cc
+2
-2
lite/kernels/mlu/bridges/interpolate_op.cc
lite/kernels/mlu/bridges/interpolate_op.cc
+1
-1
lite/kernels/mlu/bridges/interpolate_op_test.cc
lite/kernels/mlu/bridges/interpolate_op_test.cc
+1
-1
lite/kernels/mlu/bridges/utility.cc
lite/kernels/mlu/bridges/utility.cc
+3
-3
lite/kernels/npu/bridges/engine.cc
lite/kernels/npu/bridges/engine.cc
+3
-3
lite/kernels/x86/elementwise_op_function.h
lite/kernels/x86/elementwise_op_function.h
+7
-7
lite/kernels/x86/fill_constant_batch_size_like_compute_test.cc
...kernels/x86/fill_constant_batch_size_like_compute_test.cc
+1
-1
lite/kernels/x86/gather_compute.h
lite/kernels/x86/gather_compute.h
+1
-1
lite/kernels/x86/layer_norm_compute_test.cc
lite/kernels/x86/layer_norm_compute_test.cc
+1
-1
lite/kernels/x86/sequence_expand_as_compute.h
lite/kernels/x86/sequence_expand_as_compute.h
+2
-2
lite/kernels/x86/sequence_reverse_compute_test.cc
lite/kernels/x86/sequence_reverse_compute_test.cc
+1
-1
lite/kernels/x86/shape_compute.h
lite/kernels/x86/shape_compute.h
+1
-1
lite/kernels/x86/slice_compute.h
lite/kernels/x86/slice_compute.h
+1
-1
lite/kernels/x86/slice_compute_test.cc
lite/kernels/x86/slice_compute_test.cc
+6
-6
lite/kernels/x86/stack_compute.h
lite/kernels/x86/stack_compute.h
+1
-1
lite/kernels/x86/var_conv_2d_compute.h
lite/kernels/x86/var_conv_2d_compute.h
+1
-1
lite/model_parser/model_parser_test.cc
lite/model_parser/model_parser_test.cc
+1
-1
lite/operators/elementwise_ops.cc
lite/operators/elementwise_ops.cc
+7
-6
lite/operators/expand_op.cc
lite/operators/expand_op.cc
+1
-1
lite/operators/fill_constant_batch_size_like_op.cc
lite/operators/fill_constant_batch_size_like_op.cc
+1
-1
lite/operators/fill_constant_op.cc
lite/operators/fill_constant_op.cc
+1
-1
lite/operators/flatten_op.cc
lite/operators/flatten_op.cc
+1
-1
lite/operators/interpolate_op.cc
lite/operators/interpolate_op.cc
+2
-2
lite/operators/pool_op.h
lite/operators/pool_op.h
+1
-1
lite/operators/reduce_mean_op.cc
lite/operators/reduce_mean_op.cc
+3
-3
lite/operators/reshape_op.cc
lite/operators/reshape_op.cc
+2
-2
lite/operators/search_fc_op.cc
lite/operators/search_fc_op.cc
+3
-3
lite/operators/slice_op.cc
lite/operators/slice_op.cc
+5
-5
lite/operators/split_op.cc
lite/operators/split_op.cc
+1
-1
lite/operators/squeeze_op.cc
lite/operators/squeeze_op.cc
+2
-2
lite/operators/unsqueeze_op.cc
lite/operators/unsqueeze_op.cc
+1
-1
未找到文件。
lite/api/light_api.cc
浏览文件 @
2997b937
...
...
@@ -82,7 +82,7 @@ Tensor* LightPredictor::GetInputByName(const std::string& name) {
if
(
element
==
input_names_
.
end
())
{
LOG
(
ERROR
)
<<
"Model do not have input named with: ["
<<
name
<<
"], model's inputs include:"
;
for
(
in
t
i
=
0
;
i
<
input_names_
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
input_names_
.
size
();
i
++
)
{
LOG
(
ERROR
)
<<
"["
<<
input_names_
[
i
]
<<
"]"
;
}
return
nullptr
;
...
...
@@ -114,7 +114,7 @@ void LightPredictor::PrepareFeedFetch() {
auto
current_block
=
cpp_program_desc_
.
GetBlock
<
cpp
::
BlockDesc
>
(
0
);
std
::
vector
<
cpp
::
OpDesc
*>
feeds
;
std
::
vector
<
cpp
::
OpDesc
*>
fetchs
;
for
(
in
t
i
=
0
;
i
<
current_block
->
OpsSize
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
current_block
->
OpsSize
();
i
++
)
{
auto
op
=
current_block
->
GetOp
<
cpp
::
OpDesc
>
(
i
);
if
(
op
->
Type
()
==
"feed"
)
{
feeds
.
push_back
(
op
);
...
...
@@ -124,11 +124,11 @@ void LightPredictor::PrepareFeedFetch() {
}
input_names_
.
resize
(
feeds
.
size
());
output_names_
.
resize
(
fetchs
.
size
());
for
(
in
t
i
=
0
;
i
<
feeds
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
feeds
.
size
();
i
++
)
{
input_names_
[
feeds
[
i
]
->
GetAttr
<
int
>
(
"col"
)]
=
feeds
[
i
]
->
Output
(
"Out"
).
front
();
}
for
(
in
t
i
=
0
;
i
<
fetchs
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
fetchs
.
size
();
i
++
)
{
output_names_
[
fetchs
[
i
]
->
GetAttr
<
int
>
(
"col"
)]
=
fetchs
[
i
]
->
Input
(
"X"
).
front
();
}
...
...
lite/api/light_api_test.cc
浏览文件 @
2997b937
...
...
@@ -37,11 +37,11 @@ TEST(LightAPI, load) {
const
std
::
vector
<
std
::
string
>
inputs
=
predictor
.
GetInputNames
();
LOG
(
INFO
)
<<
"input size: "
<<
inputs
.
size
();
for
(
in
t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
LOG
(
INFO
)
<<
"inputnames: "
<<
inputs
[
i
];
}
const
std
::
vector
<
std
::
string
>
outputs
=
predictor
.
GetOutputNames
();
for
(
in
t
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
LOG
(
INFO
)
<<
"outputnames: "
<<
outputs
[
i
];
}
...
...
lite/api/lite_multithread_test.cc
浏览文件 @
2997b937
...
...
@@ -293,13 +293,13 @@ int main(int argc, char** argv) {
std
::
vector
<
std
::
string
>
str_input_shapes
=
split_string
(
FLAGS_input_shape
);
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
;
for
(
in
t
i
=
0
;
i
<
str_input_shapes
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
str_input_shapes
.
size
();
++
i
)
{
input_shapes
.
push_back
(
get_shape
(
str_input_shapes
[
i
]));
}
std
::
vector
<
std
::
string
>
str_input_shapes_0
=
split_string
(
FLAGS_input_shape_0
);
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes_0
;
for
(
in
t
i
=
0
;
i
<
str_input_shapes_0
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
str_input_shapes_0
.
size
();
++
i
)
{
input_shapes_0
.
push_back
(
get_shape
(
str_input_shapes_0
[
i
]));
}
...
...
lite/api/model_test.cc
浏览文件 @
2997b937
...
...
@@ -204,7 +204,7 @@ int main(int argc, char** argv) {
LOG
(
INFO
)
<<
"input shapes: "
<<
FLAGS_input_shape
;
std
::
vector
<
std
::
string
>
str_input_shapes
=
split_string
(
FLAGS_input_shape
);
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
;
for
(
in
t
i
=
0
;
i
<
str_input_shapes
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
str_input_shapes
.
size
();
++
i
)
{
LOG
(
INFO
)
<<
"input shape: "
<<
str_input_shapes
[
i
];
input_shapes
.
push_back
(
get_shape
(
str_input_shapes
[
i
]));
}
...
...
lite/api/model_test_classify.cc
浏览文件 @
2997b937
...
...
@@ -310,7 +310,7 @@ int main(int argc, char** argv) {
LOG
(
INFO
)
<<
"input shapes: "
<<
FLAGS_input_shape
;
std
::
vector
<
std
::
string
>
str_input_shapes
=
split_string
(
FLAGS_input_shape
);
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
;
for
(
in
t
i
=
0
;
i
<
str_input_shapes
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
str_input_shapes
.
size
();
++
i
)
{
LOG
(
INFO
)
<<
"input shape: "
<<
str_input_shapes
[
i
];
input_shapes
.
push_back
(
get_shape
(
str_input_shapes
[
i
]));
}
...
...
lite/api/model_test_detection.cc
浏览文件 @
2997b937
...
...
@@ -114,7 +114,7 @@ void detect_object(const float* dout,
}
std
::
string
name
=
FLAGS_out_txt
+
"_accu.txt"
;
FILE
*
fp
=
fopen
(
name
.
c_str
(),
"w"
);
for
(
in
t
i
=
0
;
i
<
objects
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
objects
.
size
();
++
i
)
{
Object
object
=
objects
.
at
(
i
);
if
(
object
.
prob
>
thresh
&&
object
.
x
>
0
&&
object
.
y
>
0
&&
object
.
width
>
0
&&
object
.
height
>
0
)
{
...
...
@@ -324,7 +324,7 @@ int main(int argc, char** argv) {
LOG
(
INFO
)
<<
"input shapes: "
<<
FLAGS_input_shape
;
std
::
vector
<
std
::
string
>
str_input_shapes
=
split_string
(
FLAGS_input_shape
);
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
;
for
(
in
t
i
=
0
;
i
<
str_input_shapes
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
str_input_shapes
.
size
();
++
i
)
{
LOG
(
INFO
)
<<
"input shape: "
<<
str_input_shapes
[
i
];
input_shapes
.
push_back
(
get_shape
(
str_input_shapes
[
i
]));
}
...
...
lite/api/paddle_api_test.cc
浏览文件 @
2997b937
...
...
@@ -36,11 +36,11 @@ TEST(CxxApi, run) {
auto
inputs
=
predictor
->
GetInputNames
();
LOG
(
INFO
)
<<
"input size: "
<<
inputs
.
size
();
for
(
in
t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
LOG
(
INFO
)
<<
"inputnames: "
<<
inputs
[
i
];
}
auto
outputs
=
predictor
->
GetOutputNames
();
for
(
in
t
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
LOG
(
INFO
)
<<
"outputnames: "
<<
outputs
[
i
];
}
auto
input_tensor
=
predictor
->
GetInputByName
(
inputs
[
0
]);
...
...
lite/api/test_googlenet_lite.cc
浏览文件 @
2997b937
...
...
@@ -38,7 +38,7 @@ TEST(CXXApi, test_lite_googlenet) {
input_tensor
->
Resize
(
input_shape
);
auto
*
data
=
input_tensor
->
mutable_data
<
float
>
();
int
input_num
=
1
;
for
(
in
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
input_num
*=
input_shape
[
i
];
}
for
(
int
i
=
0
;
i
<
input_num
;
i
++
)
{
...
...
@@ -69,7 +69,7 @@ TEST(CXXApi, test_lite_googlenet) {
for
(
size_t
i
=
0
;
i
<
results
.
size
();
++
i
)
{
EXPECT_NEAR
(
out
->
data
<
float
>
()[
i
*
51
],
results
[
i
],
1e-5
);
}
ASSERT_EQ
(
out
->
shape
().
size
(),
2
);
ASSERT_EQ
(
out
->
shape
().
size
(),
2
u
);
ASSERT_EQ
(
out
->
shape
()[
0
],
1
);
ASSERT_EQ
(
out
->
shape
()[
1
],
1000
);
}
...
...
lite/api/test_inceptionv4_lite_x86.cc
浏览文件 @
2997b937
...
...
@@ -38,7 +38,7 @@ TEST(InceptionV4, test_inceptionv4_lite_x86) {
input_tensor
->
Resize
(
input_shape
);
auto
*
data
=
input_tensor
->
mutable_data
<
float
>
();
int
input_num
=
1
;
for
(
in
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
input_num
*=
input_shape
[
i
];
}
for
(
int
i
=
0
;
i
<
input_num
;
i
++
)
{
...
...
@@ -69,13 +69,13 @@ TEST(InceptionV4, test_inceptionv4_lite_x86) {
0.0010612885
,
0.00089107914
,
0.0010112736
,
0.00097655767
}));
auto
out
=
predictor
->
GetOutput
(
0
);
ASSERT_EQ
(
out
->
shape
().
size
(),
2
);
ASSERT_EQ
(
out
->
shape
().
size
(),
2
u
);
ASSERT_EQ
(
out
->
shape
()[
0
],
1
);
ASSERT_EQ
(
out
->
shape
()[
1
],
1000
);
int
step
=
50
;
for
(
in
t
i
=
0
;
i
<
results
.
size
();
++
i
)
{
for
(
in
t
j
=
0
;
j
<
results
[
i
].
size
();
++
j
)
{
for
(
size_
t
i
=
0
;
i
<
results
.
size
();
++
i
)
{
for
(
size_
t
j
=
0
;
j
<
results
[
i
].
size
();
++
j
)
{
EXPECT_NEAR
(
out
->
data
<
float
>
()[
j
*
step
+
(
out
->
shape
()[
1
]
*
i
)],
results
[
i
][
j
],
1e-6
);
...
...
lite/api/test_mobilenetv1_lite_x86.cc
浏览文件 @
2997b937
...
...
@@ -38,7 +38,7 @@ TEST(Mobilenet_v1, test_mobilenetv1_lite_x86) {
input_tensor
->
Resize
(
input_shape
);
auto
*
data
=
input_tensor
->
mutable_data
<
float
>
();
int
input_num
=
1
;
for
(
in
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
input_num
*=
input_shape
[
i
];
}
for
(
int
i
=
0
;
i
<
input_num
;
i
++
)
{
...
...
@@ -68,13 +68,13 @@ TEST(Mobilenet_v1, test_mobilenetv1_lite_x86) {
0.0048292773
,
0.0013995157
,
0.0018453331
,
0.0002428986
,
0.00020211363
,
0.00013668182
,
0.0005855956
,
0.00025901722
}));
auto
out
=
predictor
->
GetOutput
(
0
);
ASSERT_EQ
(
out
->
shape
().
size
(),
2
);
ASSERT_EQ
(
out
->
shape
().
size
(),
2
u
);
ASSERT_EQ
(
out
->
shape
()[
0
],
1
);
ASSERT_EQ
(
out
->
shape
()[
1
],
1000
);
int
step
=
50
;
for
(
in
t
i
=
0
;
i
<
results
.
size
();
++
i
)
{
for
(
in
t
j
=
0
;
j
<
results
[
i
].
size
();
++
j
)
{
for
(
size_
t
i
=
0
;
i
<
results
.
size
();
++
i
)
{
for
(
size_
t
j
=
0
;
j
<
results
[
i
].
size
();
++
j
)
{
EXPECT_NEAR
(
out
->
data
<
float
>
()[
j
*
step
+
(
out
->
shape
()[
1
]
*
i
)],
results
[
i
][
j
],
1e-6
);
...
...
lite/api/test_mobilenetv2_lite_x86.cc
浏览文件 @
2997b937
...
...
@@ -39,7 +39,7 @@ TEST(Mobilenet_v2, test_mobilenetv2_lite_x86) {
input_tensor
->
Resize
(
input_shape
);
auto
*
data
=
input_tensor
->
mutable_data
<
float
>
();
int
input_num
=
1
;
for
(
in
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
input_num
*=
input_shape
[
i
];
}
for
(
int
i
=
0
;
i
<
input_num
;
i
++
)
{
...
...
@@ -69,13 +69,13 @@ TEST(Mobilenet_v2, test_mobilenetv2_lite_x86) {
0.0070957416
,
0.0016094646
,
0.0018807327
,
0.00010506048
,
6.823785e-05
,
0.00012269315
,
0.0007806194
,
0.00022354358
}));
auto
out
=
predictor
->
GetOutput
(
0
);
ASSERT_EQ
(
out
->
shape
().
size
(),
2
);
ASSERT_EQ
(
out
->
shape
().
size
(),
2
u
);
ASSERT_EQ
(
out
->
shape
()[
0
],
1
);
ASSERT_EQ
(
out
->
shape
()[
1
],
1000
);
int
step
=
50
;
for
(
in
t
i
=
0
;
i
<
results
.
size
();
++
i
)
{
for
(
in
t
j
=
0
;
j
<
results
[
i
].
size
();
++
j
)
{
for
(
size_
t
i
=
0
;
i
<
results
.
size
();
++
i
)
{
for
(
size_
t
j
=
0
;
j
<
results
[
i
].
size
();
++
j
)
{
EXPECT_NEAR
(
out
->
data
<
float
>
()[
j
*
step
+
(
out
->
shape
()[
1
]
*
i
)],
results
[
i
][
j
],
1e-6
);
...
...
lite/api/test_resnet50_lite_x86.cc
浏览文件 @
2997b937
...
...
@@ -38,7 +38,7 @@ TEST(Resnet50, test_resnet50_lite_x86) {
input_tensor
->
Resize
(
input_shape
);
auto
*
data
=
input_tensor
->
mutable_data
<
float
>
();
int
input_num
=
1
;
for
(
in
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
input_num
*=
input_shape
[
i
];
}
for
(
int
i
=
0
;
i
<
input_num
;
i
++
)
{
...
...
@@ -69,13 +69,13 @@ TEST(Resnet50, test_resnet50_lite_x86) {
0.006387163
,
0.0037145028
,
0.0012812682
,
0.00045948103
,
0.00013535398
,
0.0002483765
,
0.00076759676
,
0.0002773295
}));
auto
out
=
predictor
->
GetOutput
(
0
);
ASSERT_EQ
(
out
->
shape
().
size
(),
2
);
ASSERT_EQ
(
out
->
shape
().
size
(),
2
u
);
ASSERT_EQ
(
out
->
shape
()[
0
],
1
);
ASSERT_EQ
(
out
->
shape
()[
1
],
1000
);
int
step
=
50
;
for
(
in
t
i
=
0
;
i
<
results
.
size
();
++
i
)
{
for
(
in
t
j
=
0
;
j
<
results
[
i
].
size
();
++
j
)
{
for
(
size_
t
i
=
0
;
i
<
results
.
size
();
++
i
)
{
for
(
size_
t
j
=
0
;
j
<
results
[
i
].
size
();
++
j
)
{
EXPECT_NEAR
(
out
->
data
<
float
>
()[
j
*
step
+
(
out
->
shape
()[
1
]
*
i
)],
results
[
i
][
j
],
1e-6
);
...
...
lite/api/transform_test.cc
浏览文件 @
2997b937
...
...
@@ -232,8 +232,8 @@ void TestModel(const std::vector<Place>& valid_places,
for
(
int
i
=
0
;
i
<
outs
->
numel
();
++
i
)
{
LOG
(
INFO
)
<<
o_data
[
i
];
}
for
(
in
t
i
=
0
;
i
<
lod
.
size
();
++
i
)
{
for
(
in
t
j
=
0
;
j
<
lod
[
i
].
size
();
++
j
)
{
for
(
size_
t
i
=
0
;
i
<
lod
.
size
();
++
i
)
{
for
(
size_
t
j
=
0
;
j
<
lod
[
i
].
size
();
++
j
)
{
LOG
(
INFO
)
<<
lod
[
i
][
j
];
}
}
...
...
lite/backends/x86/jit/gen/matmul.cc
浏览文件 @
2997b937
...
...
@@ -40,7 +40,7 @@ void MatMulJitCode::genCode() {
for
(
size_t
g
=
0
;
g
<
groups
.
size
();
++
g
)
{
size_t
x_offset
=
0
;
size_t
wgt_offset_tmp
=
0
;
for
(
in
t
i
=
0
;
i
<
g
;
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
g
;
++
i
)
{
wgt_offset_tmp
+=
groups
[
i
]
*
block_len
;
}
for
(
int
k
=
0
;
k
<
k_
;
++
k
)
{
...
...
lite/backends/x86/math/beam_search.cc
浏览文件 @
2997b937
...
...
@@ -265,7 +265,7 @@ class BeamSearchFunctor<TARGET(kX86), T> {
// size_t num_seqs = scores->NumElements(lod_level);
size_t
num_seqs
=
scores
->
lod
()[
lod_level
].
size
()
-
1
;
size_t
seq_width
=
1
;
for
(
in
t
i
=
1
;
i
<
scores
->
dims
().
size
();
i
++
)
{
for
(
size_
t
i
=
1
;
i
<
scores
->
dims
().
size
();
i
++
)
{
seq_width
*=
scores
->
dims
()[
i
];
}
...
...
lite/backends/x86/math/blas.cc
浏览文件 @
2997b937
...
...
@@ -23,7 +23,7 @@ namespace math {
MatDescriptor
CreateMatrixDescriptor
(
const
lite
::
DDimLite
&
tensor_dim
,
int
num_flatten_cols
,
bool
trans
)
{
PADDLE_ENFORCE_GT
(
tensor_dim
.
size
(),
1
);
PADDLE_ENFORCE_GT
(
tensor_dim
.
size
(),
1
u
);
MatDescriptor
retv
;
if
(
num_flatten_cols
>
1
)
{
auto
flatten_dim
=
tensor_dim
.
Flatten2D
(
num_flatten_cols
);
...
...
lite/backends/x86/math/sequence_pooling.cc
浏览文件 @
2997b937
...
...
@@ -46,9 +46,9 @@ class MaxSeqPoolFunctor {
auto
in_dims
=
input
.
dims
();
auto
out_dims
=
output
->
dims
();
auto
idx_dims
=
index
->
dims
();
PADDLE_ENFORCE_GT
(
in_dims
.
size
(),
1
);
PADDLE_ENFORCE_GT
(
out_dims
.
size
(),
1
);
for
(
int64
_t
i
=
1
;
i
<
in_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_GT
(
in_dims
.
size
(),
1
u
);
PADDLE_ENFORCE_GT
(
out_dims
.
size
(),
1
u
);
for
(
size
_t
i
=
1
;
i
<
in_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_EQ
(
in_dims
[
i
],
out_dims
[
i
]);
}
PADDLE_ENFORCE_EQ
(
idx_dims
,
out_dims
);
...
...
@@ -95,9 +95,9 @@ class MaxSeqPoolFunctor<T, true> {
lite
::
Tensor
*
index
)
{
auto
in_dims
=
input
.
dims
();
auto
out_dims
=
output
->
dims
();
PADDLE_ENFORCE_GT
(
in_dims
.
size
(),
1
);
PADDLE_ENFORCE_GT
(
out_dims
.
size
(),
1
);
for
(
int64
_t
i
=
1
;
i
<
in_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_GT
(
in_dims
.
size
(),
1
u
);
PADDLE_ENFORCE_GT
(
out_dims
.
size
(),
1
u
);
for
(
size
_t
i
=
1
;
i
<
in_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_EQ
(
in_dims
[
i
],
out_dims
[
i
]);
}
...
...
@@ -138,7 +138,7 @@ class MaxSeqPoolGradFunctor {
auto
idx_dims
=
index
.
dims
();
PADDLE_ENFORCE_GT
(
og_dims
.
size
(),
1
);
PADDLE_ENFORCE_GT
(
ig_dims
.
size
(),
1
);
for
(
int64
_t
i
=
1
;
i
<
og_dims
.
size
();
++
i
)
{
for
(
size
_t
i
=
1
;
i
<
og_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_EQ
(
og_dims
[
i
],
ig_dims
[
i
]);
}
PADDLE_ENFORCE_EQ
(
idx_dims
,
og_dims
);
...
...
lite/core/arena/framework.cc
浏览文件 @
2997b937
...
...
@@ -107,7 +107,7 @@ void TestCase::PrepareInputsForInstruction() {
CHECK
(
!
shared_tensor_array
->
empty
())
<<
"shared_tensor_array is empty yet"
;
target_tensor_array
->
resize
(
shared_tensor_array
->
size
());
for
(
in
t
i
=
0
;
i
<
shared_tensor_array
->
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
shared_tensor_array
->
size
();
i
++
)
{
target_tensor_array
->
at
(
i
).
Resize
(
shared_tensor_array
->
at
(
i
).
dims
());
TargetCopy
(
param_type
->
type
->
target
(),
...
...
@@ -219,7 +219,7 @@ bool TestCase::CheckPrecision(const std::string& var_name,
auto
b_tensor_array
=
base_scope_
->
FindVar
(
var_name
)
->
GetMutable
<
std
::
vector
<
Tensor
>>
();
CHECK_EQ
(
a_tensor_array
->
size
(),
b_tensor_array
->
size
());
for
(
in
t
i
=
0
;
i
<
a_tensor_array
->
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
a_tensor_array
->
size
();
i
++
)
{
Tensor
*
a_tensor
=
&
(
a_tensor_array
->
at
(
i
));
Tensor
*
b_tensor
=
&
(
b_tensor_array
->
at
(
i
));
if
(
a_tensor
->
dims
().
size
()
==
0
&&
b_tensor
->
dims
().
size
()
==
0
)
{
...
...
lite/core/arena/framework.h
浏览文件 @
2997b937
...
...
@@ -166,7 +166,7 @@ class TestCase {
// TODO(Superjomn) Move this method to utils or DDim?
bool
ShapeEquals
(
const
DDim
&
a
,
const
DDim
&
b
)
{
if
(
a
.
size
()
!=
b
.
size
())
return
false
;
for
(
in
t
i
=
0
;
i
<
a
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
a
.
size
();
i
++
)
{
if
(
a
[
i
]
!=
b
[
i
])
return
false
;
}
return
true
;
...
...
lite/core/device_info.cc
浏览文件 @
2997b937
...
...
@@ -947,7 +947,7 @@ void DeviceInfo::RequestPowerNoBindMode(int thread_num) {
active_ids_
=
core_ids_
;
}
else
{
active_ids_
.
resize
(
thread_num
);
for
(
in
t
i
=
0
;
i
<
thread_num
;
++
i
)
{
for
(
uint32_
t
i
=
0
;
i
<
thread_num
;
++
i
)
{
if
(
i
<
big_core_ids_
.
size
())
{
active_ids_
[
i
]
=
big_core_ids_
[
i
];
}
else
{
...
...
lite/core/kernel.cc
浏览文件 @
2997b937
...
...
@@ -57,7 +57,7 @@ void KernelBase::ParseKernelType(const std::string &kernel_type,
std
::
string
*
alias
,
Place
*
place
)
{
auto
parts
=
Split
(
kernel_type
,
"/"
);
CHECK_EQ
(
parts
.
size
(),
5
);
CHECK_EQ
(
parts
.
size
(),
5
u
);
*
op_type
=
parts
[
0
];
*
alias
=
parts
[
1
];
...
...
lite/core/mir/fusion/conv_bn_fuser.cc
浏览文件 @
2997b937
...
...
@@ -163,23 +163,23 @@ void ConvBNFuser::InsertNewNode(SSAGraph* graph, const key2nodes_t& matched) {
int
c_size
=
conv_weight_t
->
dims
()[
1
]
*
conv_weight_t
->
dims
()[
2
]
*
conv_weight_t
->
dims
()[
3
];
int
hw
=
conv_weight_t
->
dims
()[
2
]
*
conv_weight_t
->
dims
()[
3
];
for
(
unsigned
int
k
=
0
;
k
<
conv_weight_t
->
dims
()[
0
];
++
k
)
{
for
(
unsigned
int
i
=
0
;
i
<
h
;
++
i
)
{
for
(
int
k
=
0
;
k
<
conv_weight_t
->
dims
()[
0
];
++
k
)
{
for
(
int
i
=
0
;
i
<
h
;
++
i
)
{
weight_scale
[
i
]
*=
fabsf
(
alpha_data
[
i
]);
if
(
alpha_data
[
i
]
<
0.
f
)
{
auto
ptr_row
=
conv_weight_d
+
k
*
c_size
+
i
*
hw
;
for
(
unsigned
int
j
=
0
;
j
<
hw
;
++
j
)
{
for
(
int
j
=
0
;
j
<
hw
;
++
j
)
{
ptr_row
[
j
]
*=
-
1
;
}
}
}
}
}
else
{
for
(
unsigned
int
i
=
0
;
i
<
h
;
++
i
)
{
for
(
int
i
=
0
;
i
<
h
;
++
i
)
{
weight_scale
[
i
]
*=
fabsf
(
alpha_data
[
i
]);
if
(
alpha_data
[
i
]
<
0.
f
)
{
auto
ptr_row
=
conv_weight_d
+
i
*
w
;
for
(
unsigned
int
j
=
0
;
j
<
w
;
++
j
)
{
for
(
int
j
=
0
;
j
<
w
;
++
j
)
{
ptr_row
[
j
]
*=
-
1
;
}
}
...
...
@@ -203,17 +203,17 @@ void ConvBNFuser::InsertNewNode(SSAGraph* graph, const key2nodes_t& matched) {
int
c_size
=
conv_weight_t
->
dims
()[
1
]
*
conv_weight_t
->
dims
()[
2
]
*
conv_weight_t
->
dims
()[
3
];
int
hw
=
conv_weight_t
->
dims
()[
2
]
*
conv_weight_t
->
dims
()[
3
];
for
(
unsigned
int
k
=
0
;
k
<
conv_weight_t
->
dims
()[
0
];
++
k
)
{
for
(
unsigned
int
i
=
0
;
i
<
h
;
++
i
)
{
for
(
int
k
=
0
;
k
<
conv_weight_t
->
dims
()[
0
];
++
k
)
{
for
(
int
i
=
0
;
i
<
h
;
++
i
)
{
auto
ptr_row
=
conv_weight_d
+
k
*
c_size
+
i
*
hw
;
for
(
unsigned
int
j
=
0
;
j
<
hw
;
++
j
)
{
for
(
int
j
=
0
;
j
<
hw
;
++
j
)
{
ptr_row
[
j
]
*=
alpha_data
[
i
];
}
}
}
}
else
{
for
(
unsigned
int
i
=
0
;
i
<
h
;
++
i
)
{
// n: conv2d output channels
for
(
unsigned
int
j
=
0
;
j
<
w
;
++
j
)
{
// w: conv2d input channels
for
(
int
i
=
0
;
i
<
h
;
++
i
)
{
// n: conv2d output channels
for
(
int
j
=
0
;
j
<
w
;
++
j
)
{
// w: conv2d input channels
conv_weight_d
[
i
*
w
+
j
]
*=
alpha_data
[
i
];
}
}
...
...
lite/core/mir/fusion/quant_dequant_op_fuser.cc
浏览文件 @
2997b937
...
...
@@ -260,7 +260,7 @@ void ChannelWiseDequantOpFuser::InsertNewNode(SSAGraph* graph,
auto
channel_scale_tensor
=
scope
->
FindVar
(
channel_scale_name
)
->
GetMutable
<
lite
::
Tensor
>
();
auto
*
channel_scale_data
=
channel_scale_tensor
->
data
<
float
>
();
for
(
in
t
i
=
0
;
i
<
channel_scale_tensor
->
data_size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
channel_scale_tensor
->
data_size
();
i
++
)
{
weight_scale
.
push_back
(
channel_scale_data
[
i
]
/
range
);
}
...
...
lite/core/mir/mlu_postprocess_pass.cc
浏览文件 @
2997b937
...
...
@@ -292,7 +292,7 @@ void MLUPostprocessPass::GetSubgraphOpArgType(Node* inst_node,
// get subgraph op's type info
size_t
kernel_size
=
inst_node
->
AsStmt
().
kernels
().
size
();
CHECK_GT
(
kernel_size
,
0
);
CHECK_GT
(
kernel_size
,
0
u
);
VLOG
(
4
)
<<
"subgraph kernel size: "
<<
kernel_size
;
for
(
size_t
i
=
0
;
i
<
kernel_size
;
++
i
)
{
...
...
@@ -450,7 +450,7 @@ bool MLUPostprocessPass::IsFirstConvInSubgraph(Node* arg_node, Node* inst) {
auto
*
block_desc
=
static_cast
<
operators
::
SubgraphOp
*>
(
inst
->
AsStmt
().
op
().
get
())
->
GetSubBlock
();
for
(
in
t
op_idx
=
0
;
op_idx
<
block_desc
->
OpsSize
();
op_idx
++
)
{
for
(
size_
t
op_idx
=
0
;
op_idx
<
block_desc
->
OpsSize
();
op_idx
++
)
{
auto
op_desc
=
block_desc
->
GetOp
<
cpp
::
OpDesc
>
(
op_idx
);
CHECK
(
op_desc
);
if
(
op_desc
->
Type
()
==
"conv2d"
)
{
...
...
lite/core/mir/subgraph/subgraph_detector.cc
浏览文件 @
2997b937
...
...
@@ -47,8 +47,8 @@ std::string SubgraphVisualizer::operator()() {
"turquoise4"
,
"snow3"
,
"sienna4"
,
"salmon2"
,
};
std
::
unordered_map
<
Node
*
,
int
>
subgraph_indices
;
for
(
in
t
i
=
0
;
i
<
subgraphs_
.
size
();
i
++
)
{
for
(
in
t
j
=
0
;
j
<
subgraphs_
[
i
].
size
();
j
++
)
{
for
(
size_
t
i
=
0
;
i
<
subgraphs_
.
size
();
i
++
)
{
for
(
size_
t
j
=
0
;
j
<
subgraphs_
[
i
].
size
();
j
++
)
{
subgraph_indices
[
subgraphs_
[
i
][
j
]]
=
i
;
}
}
...
...
@@ -538,7 +538,8 @@ void SubgraphFuser::ReplaceNodesWithSubgraphs(SSAGraph *graph,
std
::
vector
<
std
::
vector
<
Node
*>>
subgraphs
=
SubgraphDetector
(
graph
,
teller
)();
SubgraphVisualizer
(
graph
,
subgraphs
)();
for
(
int
subgraph_idx
=
0
;
subgraph_idx
<
subgraphs
.
size
();
subgraph_idx
++
)
{
for
(
size_t
subgraph_idx
=
0
;
subgraph_idx
<
subgraphs
.
size
();
subgraph_idx
++
)
{
if
(
subgraphs
[
subgraph_idx
].
size
()
>=
min_subgraph_size
)
{
InsertNewNode
(
graph
,
subgraph_idx
,
subgraphs
[
subgraph_idx
]);
}
...
...
lite/core/mir/subgraph/subgraph_detector_test.cc
浏览文件 @
2997b937
...
...
@@ -36,8 +36,8 @@ std::vector<std::string> AddFCDesc(
const
std
::
shared_ptr
<
Scope
>&
scope
,
const
std
::
vector
<
std
::
string
>&
input_var_names
,
const
std
::
vector
<
int64_t
>&
wshape
)
{
CHECK_EQ
(
input_var_names
.
size
(),
1
);
CHECK_EQ
(
wshape
.
size
(),
2
);
CHECK_EQ
(
input_var_names
.
size
(),
1
u
);
CHECK_EQ
(
wshape
.
size
(),
2
u
);
static
int
id
=
0
;
std
::
string
prefix
=
"fc_"
+
paddle
::
lite
::
to_string
(
id
);
auto
*
op_desc
=
block_desc
->
AddOp
<
cpp
::
OpDesc
>
();
...
...
@@ -169,8 +169,8 @@ TEST(Subgraph, detect_simple_model) {
};
std
::
vector
<
std
::
vector
<
mir
::
Node
*>>
subgraphs
=
mir
::
SubgraphDetector
(
graph
.
get
(),
teller
)();
ASSERT_EQ
(
subgraphs
.
size
(),
1
);
ASSERT_EQ
(
graph
->
nodes
().
size
(),
9
);
ASSERT_EQ
(
subgraphs
.
size
(),
1
u
);
ASSERT_EQ
(
graph
->
nodes
().
size
(),
9
u
);
mir
::
SubgraphVisualizer
(
graph
.
get
(),
subgraphs
)();
}
...
...
@@ -221,7 +221,7 @@ TEST(Subgraph, detect_custom_model) {
std
::
vector
<
std
::
vector
<
mir
::
Node
*>>
subgraphs
=
mir
::
SubgraphDetector
(
graph
.
get
(),
teller
)();
mir
::
SubgraphVisualizer
(
graph
.
get
(),
subgraphs
)();
ASSERT_EQ
(
subgraphs
.
size
(),
1
);
ASSERT_EQ
(
subgraphs
.
size
(),
1
u
);
}
}
// namespace lite
...
...
lite/core/mir/subgraph/subgraph_pass_test.cc
浏览文件 @
2997b937
...
...
@@ -39,7 +39,7 @@ std::vector<std::vector<int64_t>> ShapeParsing(std::string text) {
std
::
vector
<
std
::
vector
<
int64_t
>>
shapes
;
std
::
vector
<
std
::
string
>
shape_strings
=
Split
(
text
,
":"
);
shapes
.
resize
(
shape_strings
.
size
());
for
(
in
t
i
=
0
;
i
<
shape_strings
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
shape_strings
.
size
();
i
++
)
{
std
::
vector
<
std
::
string
>
shape_nums
=
Split
(
shape_strings
[
i
],
","
);
for
(
auto
shape_num
:
shape_nums
)
{
shapes
[
i
].
push_back
(
atoi
(
shape_num
.
c_str
()));
...
...
@@ -66,7 +66,7 @@ void FillInputTensors(
for (int j = 0; j < input_tensor_size; j++) { \
input_tensor_data[j] = static_cast<type>(value); \
}
for
(
in
t
i
=
0
;
i
<
input_tensor_shape
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
input_tensor_shape
.
size
();
i
++
)
{
auto
input_tensor
=
predictor
->
GetInput
(
i
);
input_tensor
->
Resize
(
input_tensor_shape
[
i
]);
auto
input_tensor_size
=
ShapeProduction
(
input_tensor
->
shape
());
...
...
@@ -95,7 +95,7 @@ void CheckOutputTensors(
<< " abs_diff: " << abs_diff << " rel_diff: " << rel_diff; \
EXPECT_LT(rel_diff, 0.1); \
}
for
(
in
t
i
=
0
;
i
<
output_tensor_type
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
output_tensor_type
.
size
();
i
++
)
{
auto
tar_output_tensor
=
tar_predictor
->
GetOutput
(
i
);
auto
ref_output_tensor
=
ref_predictor
->
GetOutput
(
i
);
auto
tar_output_tensor_size
=
ShapeProduction
(
tar_output_tensor
->
shape
());
...
...
lite/core/op_lite.cc
浏览文件 @
2997b937
...
...
@@ -41,7 +41,7 @@ bool OpLite::InferShapeWithCache() {
iter
++
)
{
// combined dims value into new_hash value.
auto
&
element_dims
=
(
*
iter
)
->
dims
();
for
(
in
t
i
=
0
;
i
<
element_dims
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
element_dims
.
size
();
i
++
)
{
new_hash
=
lite
::
hash_combine
(
new_hash
,
static_cast
<
int
>
(
element_dims
[
i
]));
}
...
...
@@ -49,7 +49,7 @@ bool OpLite::InferShapeWithCache() {
auto
&
emement_lods
=
(
*
iter
)
->
lod
();
for
(
auto
lod_iter
=
emement_lods
.
begin
();
lod_iter
!=
emement_lods
.
end
();
lod_iter
++
)
{
for
(
in
t
i
=
0
;
i
<
lod_iter
->
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
lod_iter
->
size
();
i
++
)
{
new_hash
=
lite
::
hash_combine
(
new_hash
,
static_cast
<
int
>
(
lod_iter
->
at
(
i
)));
}
...
...
@@ -60,7 +60,7 @@ bool OpLite::InferShapeWithCache() {
// if current hash value is consistent with io_shape_lod_hash_,
// previous outputs shape and lod are reused.
auto
*
current_outputs
=
param_
.
output_tensor_ptrs
();
for
(
in
t
i
=
0
;
i
<
current_outputs
->
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
current_outputs
->
size
();
i
++
)
{
current_outputs
->
at
(
i
)
->
Resize
(
last_output_shapes
[
i
]);
current_outputs
->
at
(
i
)
->
set_lod
(
last_output_lods
[
i
]);
}
...
...
@@ -69,7 +69,7 @@ bool OpLite::InferShapeWithCache() {
io_shape_lod_hash_
=
new_hash
;
this
->
InferShapeImpl
();
auto
*
current_outputs
=
param_
.
output_tensor_ptrs
();
for
(
in
t
i
=
0
;
i
<
current_outputs
->
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
current_outputs
->
size
();
i
++
)
{
last_output_shapes
[
i
]
=
current_outputs
->
at
(
i
)
->
dims
();
last_output_lods
[
i
]
=
current_outputs
->
at
(
i
)
->
lod
();
}
...
...
lite/core/program.cc
浏览文件 @
2997b937
...
...
@@ -72,7 +72,7 @@ void RuntimeProgram::UpdateVarsOfProgram(cpp::ProgramDesc* desc) {
std
::
unordered_map
<
std
::
string
,
cpp
::
VarDesc
>
origin_var_maps
;
auto
&
main_block
=
*
desc
->
GetBlock
<
cpp
::
BlockDesc
>
(
0
);
auto
var_size
=
main_block
.
VarsSize
();
for
(
in
t
i
=
0
;
i
<
var_size
;
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
var_size
;
i
++
)
{
auto
v
=
main_block
.
GetVar
<
cpp
::
VarDesc
>
(
i
);
auto
name
=
v
->
Name
();
origin_var_maps
.
emplace
(
name
,
*
v
);
...
...
lite/core/tensor.cc
浏览文件 @
2997b937
...
...
@@ -100,7 +100,7 @@ void *TensorLite::mutable_data(TargetType target, size_t memory_size) {
void
TensorLite
::
ResetBuffer
(
std
::
shared_ptr
<
Buffer
>
buffer
,
size_t
memory_size
)
{
CHECK_EQ
(
offset_
,
0
)
CHECK_EQ
(
offset_
,
0
u
)
<<
"Only the offset is supported to zero when the Buffer is reset."
;
if
(
buffer_
)
{
CHECK_LE
(
memory_size_
,
buffer
->
space
())
...
...
lite/kernels/mlu/bridges/act_op_test.cc
浏览文件 @
2997b937
...
...
@@ -44,40 +44,40 @@ void act_ref(const std::shared_ptr<operators::ActivationOp> op) {
// "sigmoid","relu","tanh","relu_clipped","leaky_relu","softsign","hard_sigmoid"
if
(
op_type
==
"sigmoid"
)
{
for
(
size_
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
for
(
in
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
out_data
[
i
]
=
1.
f
/
(
1.
f
+
std
::
exp
(
-
x_data
[
i
]));
}
}
else
if
(
op_type
==
"relu"
)
{
for
(
size_
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
for
(
in
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
out_data
[
i
]
=
std
::
max
(
0.
f
,
x_data
[
i
]);
}
}
else
if
(
op_type
==
"tanh"
)
{
for
(
size_
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
for
(
in
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
out_data
[
i
]
=
(
std
::
exp
(
x_data
[
i
])
-
std
::
exp
(
-
x_data
[
i
]))
/
(
std
::
exp
(
x_data
[
i
])
+
std
::
exp
(
-
x_data
[
i
]));
}
}
else
if
(
op_type
==
"relu_clipped"
)
{
auto
relu_clipped_coef
=
op_info
->
GetAttr
<
float
>
(
"Relu_clipped_coef"
);
for
(
size_
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
for
(
in
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
out_data
[
i
]
=
std
::
min
(
std
::
max
(
0.
f
,
x_data
[
i
]),
relu_clipped_coef
);
}
}
else
if
(
op_type
==
"relu6"
)
{
for
(
size_
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
for
(
in
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
out_data
[
i
]
=
std
::
min
(
std
::
max
(
0.
f
,
x_data
[
i
]),
6.
f
);
}
}
else
if
(
op_type
==
"leaky_relu"
)
{
auto
alpha
=
op_info
->
GetAttr
<
float
>
(
"alpha"
);
for
(
size_
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
for
(
in
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
out_data
[
i
]
=
std
::
max
(
x_data
[
i
],
x_data
[
i
]
*
alpha
);
}
}
else
if
(
op_type
==
"softsign"
)
{
for
(
size_
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
for
(
in
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
out_data
[
i
]
=
x_data
[
i
]
/
(
1
+
std
::
abs
(
x_data
[
i
]));
}
}
else
if
(
op_type
==
"hard_sigmoid"
)
{
auto
slope
=
op_info
->
GetAttr
<
float
>
(
"slope"
);
auto
offset
=
op_info
->
GetAttr
<
float
>
(
"offset"
);
for
(
size_
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
for
(
in
t
i
=
0
;
i
<
out
->
numel
();
i
++
)
{
out_data
[
i
]
=
std
::
min
(
1.
f
,
slope
*
x_data
[
i
]
+
offset
);
out_data
[
i
]
=
std
::
max
(
0.
f
,
out_data
[
i
]);
}
...
...
lite/kernels/mlu/bridges/concat_op_test.cc
浏览文件 @
2997b937
...
...
@@ -37,7 +37,7 @@ void concat_ref(const std::shared_ptr<operators::ConcatOpLite> op) {
scope
->
FindVar
(
op_info
->
Output
(
"Out"
).
front
())
->
GetMutable
<
Tensor
>
();
int
axis
=
op_info
->
GetAttr
<
int
>
(
"axis"
);
std
::
vector
<
lite
::
Tensor
*>
inputs_concat
(
inputs
.
size
());
for
(
in
t
j
=
0
;
j
<
inputs
.
size
();
++
j
)
{
for
(
size_
t
j
=
0
;
j
<
inputs
.
size
();
++
j
)
{
inputs_concat
[
j
]
=
inputs
[
j
];
}
size_t
num
=
inputs
.
size
();
...
...
@@ -48,7 +48,7 @@ void concat_ref(const std::shared_ptr<operators::ConcatOpLite> op) {
}
int
out_rows
=
rows
,
out_cols
=
0
;
std
::
vector
<
int64_t
>
inputs_cols
(
inputs
.
size
());
for
(
in
t
i
=
0
;
i
<
num
;
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
num
;
++
i
)
{
int
t_cols
=
inputs
[
i
]
->
numel
()
/
rows
;
out_cols
+=
t_cols
;
inputs_cols
[
i
]
=
t_cols
;
...
...
@@ -56,7 +56,7 @@ void concat_ref(const std::shared_ptr<operators::ConcatOpLite> op) {
for
(
int
k
=
0
;
k
<
out_rows
;
++
k
)
{
float
*
dst_ptr
=
out
->
mutable_data
<
float
>
()
+
k
*
out_cols
;
int
col_idx
=
0
;
for
(
in
t
j
=
0
;
j
<
num
;
++
j
)
{
for
(
size_
t
j
=
0
;
j
<
num
;
++
j
)
{
int
col_len
=
inputs_cols
[
j
];
const
float
*
src_prt
=
inputs
[
j
]
->
data
<
float
>
()
+
k
*
col_len
;
std
::
memcpy
(
dst_ptr
+
col_idx
,
src_prt
,
sizeof
(
float
)
*
col_len
);
...
...
lite/kernels/mlu/bridges/conv_op.cc
浏览文件 @
2997b937
...
...
@@ -43,20 +43,20 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
const
auto
output_shape
=
output
->
dims
().
Vectorize
();
const
auto
bs
=
input_dims
[
0
];
const
auto
oc
=
filter_dims
[
0
];
CHECK_EQ
(
input_dims
.
size
(),
4
);
CHECK_EQ
(
filter_dims
.
size
(),
4
);
CHECK_EQ
(
input_dims
.
size
(),
4
u
);
CHECK_EQ
(
filter_dims
.
size
(),
4
u
);
const
auto
strides
=
op_info
->
GetAttr
<
std
::
vector
<
int
>>
(
"strides"
);
auto
dilations
=
op_info
->
GetAttr
<
std
::
vector
<
int
>>
(
"dilations"
);
auto
paddings
=
op_info
->
GetAttr
<
std
::
vector
<
int
>>
(
"paddings"
);
CHECK_EQ
(
strides
.
size
(),
2
L
);
CHECK_EQ
(
dilations
.
size
(),
2
L
);
if
(
paddings
.
size
()
==
2
L
)
{
CHECK_EQ
(
strides
.
size
(),
2
u
);
CHECK_EQ
(
dilations
.
size
(),
2
u
);
if
(
paddings
.
size
()
==
2
u
)
{
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
int
copy_pad
=
*
(
paddings
.
begin
()
+
2
*
i
);
paddings
.
insert
(
paddings
.
begin
()
+
2
*
i
+
1
,
copy_pad
);
}
}
CHECK_EQ
(
paddings
.
size
(),
4
L
)
CHECK_EQ
(
paddings
.
size
(),
4
u
)
<<
"Paddings size should be the same or twice as the input size."
;
const
std
::
string
padding_algorithm
=
...
...
lite/kernels/mlu/bridges/conv_op_test.cc
浏览文件 @
2997b937
...
...
@@ -173,10 +173,10 @@ void test_conv(int bs,
Tensor
input_int
;
input_int
.
Resize
(
input_shape
);
FillTensor
<
int8_t
,
int8_t
>
(
&
input_int
,
-
127
,
127
);
for
(
in
t
i
=
0
;
i
<
input
->
data_size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
input
->
data_size
();
i
++
)
{
input
->
mutable_data
<
float
>
()[
i
]
=
input_int
.
data
<
int8_t
>
()[
i
]
*
input_scale
;
}
for
(
in
t
i
=
0
;
i
<
filter
->
data_size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
filter
->
data_size
();
i
++
)
{
filter
->
mutable_data
<
float
>
()[
i
]
=
filter_int
->
data
<
int8_t
>
()[
i
]
*
filter_scale
;
}
...
...
lite/kernels/mlu/bridges/fc_op_test.cc
浏览文件 @
2997b937
...
...
@@ -97,11 +97,11 @@ void test_fc(const std::vector<int64_t>& input_shape,
Tensor
input_int
;
input_int
.
Resize
(
input_shape
);
FillTensor
<
int8_t
,
int8_t
>
(
&
input_int
,
-
127
,
127
);
for
(
in
t
i
=
0
;
i
<
input
->
data_size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
input
->
data_size
();
i
++
)
{
input
->
mutable_data
<
float
>
()[
i
]
=
input_int
.
data
<
int8_t
>
()[
i
]
*
input_scale
;
}
for
(
in
t
i
=
0
;
i
<
w
->
data_size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
w
->
data_size
();
i
++
)
{
w
->
mutable_data
<
float
>
()[
i
]
=
w_int
->
data
<
int8_t
>
()[
i
]
*
w_scale
;
}
...
...
lite/kernels/mlu/bridges/interpolate_op.cc
浏览文件 @
2997b937
...
...
@@ -36,7 +36,7 @@ int InterpolateConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto
x
=
scope
->
FindVar
(
x_var_name
)
->
GetMutable
<
Tensor
>
();
auto
out
=
scope
->
FindVar
(
out_var_name
)
->
GetMutable
<
Tensor
>
();
auto
x_dims
=
x
->
dims
();
CHECK_EQ
(
x_dims
.
size
(),
4
);
CHECK_EQ
(
x_dims
.
size
(),
4
u
);
auto
scale
=
op_info
->
GetAttr
<
float
>
(
"scale"
);
auto
out_w
=
op_info
->
GetAttr
<
int
>
(
"out_w"
);
auto
out_h
=
op_info
->
GetAttr
<
int
>
(
"out_h"
);
...
...
lite/kernels/mlu/bridges/interpolate_op_test.cc
浏览文件 @
2997b937
...
...
@@ -85,7 +85,7 @@ void BilinearInterpRef(const lite::Tensor* x,
int
channel_size
=
x_dims
[
1
];
auto
x_h
=
x_dims
[
2
];
auto
x_w
=
x_dims
[
3
];
CHECK_EQ
(
x_dims
.
size
(),
4
);
CHECK_EQ
(
x_dims
.
size
(),
4
u
);
auto
out_dims
=
out
->
dims
();
int
out_h
=
out_dims
[
2
];
...
...
lite/kernels/mlu/bridges/utility.cc
浏览文件 @
2997b937
...
...
@@ -59,10 +59,10 @@ void dequant(float* dst,
size_t
size
,
size_t
size_in
,
std
::
vector
<
float
>
scales
)
{
for
(
in
t
out
=
0
;
out
<
size_o
;
++
out
)
{
for
(
in
t
s
=
0
;
s
<
size
;
++
s
)
{
for
(
size_
t
out
=
0
;
out
<
size_o
;
++
out
)
{
for
(
size_
t
s
=
0
;
s
<
size
;
++
s
)
{
auto
scale
=
scales
[
s
];
for
(
in
t
in
=
0
;
in
<
size_in
;
++
in
)
{
for
(
size_
t
in
=
0
;
in
<
size_in
;
++
in
)
{
int
idx
=
in
+
s
*
size_in
+
out
*
size_in
*
size
;
dst
[
idx
]
=
static_cast
<
float
>
(
src
[
idx
])
*
scale
;
}
...
...
lite/kernels/npu/bridges/engine.cc
浏览文件 @
2997b937
...
...
@@ -30,7 +30,7 @@ int Engine::BuildOriginProgram() {
// TODO(hong19860320) The block_desc need to be divided into subgraphs during
// the exection time. But only see them as a subgraph now.
origin_program_
.
clear
();
for
(
in
t
op_idx
=
0
;
op_idx
<
block_desc_
->
OpsSize
();
op_idx
++
)
{
for
(
size_
t
op_idx
=
0
;
op_idx
<
block_desc_
->
OpsSize
();
op_idx
++
)
{
auto
op_desc
=
block_desc_
->
GetOp
<
cpp
::
OpDesc
>
(
op_idx
);
CHECK
(
op_desc
);
std
::
string
op_type
=
op_desc
->
Type
();
...
...
@@ -46,7 +46,7 @@ int Engine::BuildOriginProgram() {
VLOG
(
3
)
<<
"Found the attr '"
<<
kKernelTypeAttr
<<
"': "
<<
kernel_type
<<
" for "
<<
op_type
;
auto
kernels
=
op
->
CreateKernels
({
place
});
CHECK_GT
(
kernels
.
size
(),
0
)
<<
"No kernels found for "
<<
op_type
;
CHECK_GT
(
kernels
.
size
(),
0
u
)
<<
"No kernels found for "
<<
op_type
;
auto
it
=
std
::
find_if
(
kernels
.
begin
(),
kernels
.
end
(),
[
&
](
std
::
unique_ptr
<
KernelBase
>&
it
)
{
return
it
->
alias
()
==
alias
;
...
...
@@ -96,7 +96,7 @@ int Engine::Build() {
}
bool
Engine
::
InputShapeChanged
()
{
for
(
in
t
i
=
0
;
i
<
origin_itensors_
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
origin_itensors_
.
size
();
i
++
)
{
if
(
origin_itensors_
[
i
]
->
dims
()
!=
origin_idims_
[
i
])
{
return
true
;
}
...
...
lite/kernels/x86/elementwise_op_function.h
浏览文件 @
2997b937
...
...
@@ -64,14 +64,14 @@ inline void get_mid_dims(const lite::DDim &x_dims,
for
(
int
i
=
0
;
i
<
axis
;
++
i
)
{
(
*
pre
)
*=
x_dims
[
i
];
}
for
(
in
t
i
=
0
;
i
<
y_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
y_dims
.
size
();
++
i
)
{
if
(
x_dims
[
i
+
axis
]
!=
y_dims
[
i
])
{
// only support single y_dims[i] = 1 now.
PADDLE_ENFORCE_EQ
(
*
mid_flag
,
0
,
"Broadcast support y_dims with single 1."
);
PADDLE_ENFORCE_EQ
(
y_dims
[
i
],
1
,
"Broadcast dimension mismatch."
);
// m*n*k m*1*k
for
(
in
t
j
=
0
;
j
<
i
;
++
j
)
{
for
(
size_
t
j
=
0
;
j
<
i
;
++
j
)
{
(
*
pre
)
*=
y_dims
[
j
];
}
*
n
=
std
::
max
(
x_dims
[
i
+
axis
],
y_dims
[
i
]);
...
...
@@ -82,11 +82,11 @@ inline void get_mid_dims(const lite::DDim &x_dims,
(
*
n
)
*=
y_dims
[
i
];
}
if
(
*
mid_flag
)
{
for
(
in
t
i
=
mid
+
1
;
i
<
x_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
mid
+
1
;
i
<
x_dims
.
size
();
++
i
)
{
(
*
post
)
*=
x_dims
[
i
];
}
}
else
{
for
(
in
t
i
=
axis
+
y_dims
.
size
();
i
<
x_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
axis
+
y_dims
.
size
();
i
<
x_dims
.
size
();
++
i
)
{
(
*
post
)
*=
x_dims
[
i
];
}
}
...
...
@@ -95,13 +95,13 @@ inline void get_mid_dims(const lite::DDim &x_dims,
(
*
pre
)
*=
x_dims
[
i
];
}
for
(
in
t
i
=
0
;
i
<
y_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
y_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_EQ
(
x_dims
[
i
+
axis
],
y_dims
[
i
],
"Broadcast dimension mismatch."
);
(
*
n
)
*=
y_dims
[
i
];
}
for
(
in
t
i
=
axis
+
y_dims
.
size
();
i
<
x_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
axis
+
y_dims
.
size
();
i
<
x_dims
.
size
();
++
i
)
{
(
*
post
)
*=
x_dims
[
i
];
}
}
...
...
@@ -116,7 +116,7 @@ inline lite::DDim trim_trailing_singular_dims(const lite::DDim &dims) {
std
::
vector
<
int64_t
>
trim_dims
;
trim_dims
.
resize
(
actual_dims_size
);
for
(
in
t
i
=
0
;
i
<
actual_dims_size
;
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
actual_dims_size
;
++
i
)
{
trim_dims
[
i
]
=
dims
[
i
];
}
if
(
trim_dims
.
size
()
==
0
)
{
...
...
lite/kernels/x86/fill_constant_batch_size_like_compute_test.cc
浏览文件 @
2997b937
...
...
@@ -71,7 +71,7 @@ TEST(fill_constant_batch_size_like_x86, run_test) {
std
::
vector
<
float
>
ref_results
{
3.5
,
3.5
,
3.5
,
3.5
,
3.5
,
3.5
,
3.5
,
3.5
,
3.5
,
3.5
};
for
(
in
t
i
=
0
;
i
<
ref_results
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
ref_results
.
size
();
i
++
)
{
EXPECT_NEAR
(
out_data
[
i
],
ref_results
[
i
],
1e-3
);
}
}
...
...
lite/kernels/x86/gather_compute.h
浏览文件 @
2997b937
...
...
@@ -56,7 +56,7 @@ void CPUGather(const lite::Tensor* src,
// slice size
int
slice_size
=
1
;
for
(
in
t
i
=
1
;
i
<
src_dims
.
size
();
++
i
)
slice_size
*=
src_dims
[
i
];
for
(
size_
t
i
=
1
;
i
<
src_dims
.
size
();
++
i
)
slice_size
*=
src_dims
[
i
];
const
size_t
slice_bytes
=
slice_size
*
sizeof
(
T
);
for
(
int64_t
i
=
0
;
i
<
index_size
;
++
i
)
{
...
...
lite/kernels/x86/layer_norm_compute_test.cc
浏览文件 @
2997b937
...
...
@@ -108,7 +108,7 @@ TEST(layer_norm_x86, run_test) {
for
(
int
i
=
0
;
i
<
begin_norm_axis
;
++
i
)
{
pre
*=
x_shape
[
i
];
}
for
(
in
t
i
=
begin_norm_axis
;
i
<
x_shape
.
size
();
++
i
)
{
for
(
size_
t
i
=
begin_norm_axis
;
i
<
x_shape
.
size
();
++
i
)
{
post
*=
x_shape
[
i
];
}
std
::
vector
<
int64_t
>
scale_shape
({
post
});
...
...
lite/kernels/x86/sequence_expand_as_compute.h
浏览文件 @
2997b937
...
...
@@ -66,8 +66,8 @@ class SequenceExpandAsCompute
auto
*
out
=
param
.
out
;
auto
&
y_lod
=
y
->
lod
();
CHECK_EQ
(
y_lod
.
size
(),
1
);
CHECK_GT
(
y_lod
[
0
].
size
(),
1
);
CHECK_EQ
(
y_lod
.
size
(),
1
u
);
CHECK_GT
(
y_lod
[
0
].
size
(),
1
u
);
out
->
template
mutable_data
<
T
,
T
>();
...
...
lite/kernels/x86/sequence_reverse_compute_test.cc
浏览文件 @
2997b937
...
...
@@ -30,7 +30,7 @@ static void sequence_reverse_ref(const lite::Tensor* x, lite::Tensor* y) {
auto
seq_offset
=
x
->
lod
()[
x
->
lod
().
size
()
-
1
];
int
width
=
x
->
numel
()
/
x
->
dims
()[
0
];
auto
*
y_data
=
y
->
mutable_data
<
float
>
();
for
(
in
t
i
=
0
;
i
<
seq_offset
.
size
()
-
1
;
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
seq_offset
.
size
()
-
1
;
++
i
)
{
auto
start_pos
=
seq_offset
[
i
];
auto
end_pos
=
seq_offset
[
i
+
1
];
for
(
auto
pos
=
start_pos
;
pos
<
end_pos
;
++
pos
)
{
...
...
lite/kernels/x86/shape_compute.h
浏览文件 @
2997b937
...
...
@@ -31,7 +31,7 @@ class ShapeCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
// auto& context = context_->As<X86Context>();
auto
out_data
=
param
.
Out
->
template
mutable_data
<
int32_t
>();
auto
in_dims
=
param
.
X
->
dims
();
for
(
in
t
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
out_data
[
i
]
=
in_dims
[
i
];
}
}
...
...
lite/kernels/x86/slice_compute.h
浏览文件 @
2997b937
...
...
@@ -118,7 +118,7 @@ void slice_compute(const lite::Tensor* in,
out_dims
[
decrease_axis
[
i
]]
=
0
;
}
for
(
in
t
i
=
0
;
i
<
out_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
out_dims
.
size
();
++
i
)
{
if
(
out_dims
[
i
]
!=
0
)
{
new_out_shape
.
push_back
(
out_dims
[
i
]);
}
...
...
lite/kernels/x86/slice_compute_test.cc
浏览文件 @
2997b937
...
...
@@ -34,10 +34,10 @@ static void slice_ref(const float* input,
std
::
vector
<
int
>
real_starts
(
in_dims
.
size
(),
0
);
std
::
vector
<
int
>
real_ends
(
in_dims
.
size
(),
0
);
std
::
vector
<
int
>
real_step
(
in_dims
.
size
(),
0
);
for
(
in
t
i
=
0
;
i
<
in_dims
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
in_dims
.
size
();
i
++
)
{
real_ends
[
i
]
=
in_dims
[
i
];
}
for
(
in
t
i
=
0
;
i
<
axes
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
axes
.
size
();
i
++
)
{
int
dim_value
=
in_dims
[
axes
[
i
]];
if
(
dim_value
>
0
)
{
int
start
=
starts
[
i
]
<
0
?
(
starts
[
i
]
+
dim_value
)
:
starts
[
i
];
...
...
@@ -52,11 +52,11 @@ static void slice_ref(const float* input,
}
const
int
LEN
=
in_dims
.
size
();
int
dst_step
[
LEN
];
for
(
in
t
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
dst_step
[
i
]
=
1
;
}
int
src_step
[
LEN
];
for
(
in
t
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
src_step
[
i
]
=
1
;
}
int
out_num
=
out_dims
[
in_dims
.
size
()
-
1
];
...
...
@@ -69,7 +69,7 @@ static void slice_ref(const float* input,
for
(
int
dst_id
=
0
;
dst_id
<
out_num
;
dst_id
++
)
{
int
src_id
=
0
;
int
index_id
=
dst_id
;
for
(
in
t
j
=
0
;
j
<
out_dims
.
size
();
j
++
)
{
for
(
size_
t
j
=
0
;
j
<
out_dims
.
size
();
j
++
)
{
int
cur_id
=
index_id
/
dst_step
[
j
];
index_id
=
index_id
%
dst_step
[
j
];
src_id
+=
(
cur_id
+
real_starts
[
j
])
*
src_step
[
j
];
...
...
@@ -409,7 +409,7 @@ void test_tensor_case3(lite::Tensor x, lite::Tensor out) {
lite
::
Tensor
starts_tensor
,
ends_tensor
;
starts_tensor
.
Resize
(
DDim
({
3
}));
ends_tensor
.
Resize
(
DDim
({
3
}));
for
(
in
t
i
=
0
;
i
<
starts
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
starts
.
size
();
++
i
)
{
starts_tensor
.
mutable_data
<
int
>
()[
i
]
=
starts
[
i
];
ends_tensor
.
mutable_data
<
int
>
()[
i
]
=
ends
[
i
];
}
...
...
lite/kernels/x86/stack_compute.h
浏览文件 @
2997b937
...
...
@@ -47,7 +47,7 @@ class StackCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
int
pre
=
1
,
post
=
1
;
auto
dim
=
x
[
0
]
->
dims
();
for
(
int
i
=
0
;
i
<
axis
;
++
i
)
pre
*=
dim
[
i
];
for
(
in
t
i
=
axis
;
i
<
dim
.
size
();
++
i
)
post
*=
dim
[
i
];
for
(
size_
t
i
=
axis
;
i
<
dim
.
size
();
++
i
)
post
*=
dim
[
i
];
auto
x_data_arr
=
x_datas
.
data
();
...
...
lite/kernels/x86/var_conv_2d_compute.h
浏览文件 @
2997b937
...
...
@@ -44,7 +44,7 @@ class VarConv2DCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
// 2-D lod info.
// const auto& offset_x = in_col->lod()[0];
// const auto& offset_y = in_row->lod()[0];
CHECK_EQ
(
param
.
X
->
lod
().
size
(),
3
)
<<
"input lod size should be 3!"
;
CHECK_EQ
(
param
.
X
->
lod
().
size
(),
3
u
)
<<
"input lod size should be 3!"
;
const
auto
&
offset_y
=
param
.
X
->
lod
()[
1
];
const
auto
&
offset_x
=
param
.
X
->
lod
()[
2
];
...
...
lite/model_parser/model_parser_test.cc
浏览文件 @
2997b937
...
...
@@ -107,7 +107,7 @@ TEST(ModelParser, LoadParamNaive) {
ASSERT_EQ
(
bg_lod
,
tensor
.
lod
());
ASSERT_EQ
(
tensor
.
data_size
(),
size
);
auto
*
data
=
tensor
.
data
<
float
>
();
for
(
in
t
i
=
0
;
i
<
size
;
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
size
;
++
i
)
{
EXPECT_NEAR
(
bg_data
[
i
],
data
[
i
],
1e-6
);
}
}
...
...
lite/operators/elementwise_ops.cc
浏览文件 @
2997b937
...
...
@@ -35,7 +35,8 @@ bool ElementwiseOp::InferShapeImpl() const {
auto
out_lod
=
param_
.
Out
->
mutable_lod
();
*
out_lod
=
param_
.
X
->
lod
();
}
else
{
int
max_dim
=
(
x_dim
.
size
()
>
y_dim
.
size
()
?
x_dim
.
size
()
:
y_dim
.
size
());
size_t
max_dim
=
(
x_dim
.
size
()
>
y_dim
.
size
()
?
x_dim
.
size
()
:
y_dim
.
size
());
int
axis
=
param_
.
axis
;
axis
=
(
axis
==
-
1
?
std
::
abs
(
static_cast
<
int
>
(
x_dim
.
size
()
-
y_dim
.
size
()))
:
axis
);
...
...
@@ -48,12 +49,12 @@ bool ElementwiseOp::InferShapeImpl() const {
y_dims_array
[
i
]
=
1
;
}
if
(
axis
+
y_dim
.
size
()
<
max_dim
)
{
for
(
in
t
i
=
axis
+
y_dim
.
size
();
i
<
max_dim
;
++
i
)
{
for
(
size_
t
i
=
axis
+
y_dim
.
size
();
i
<
max_dim
;
++
i
)
{
y_dims_array
[
i
]
=
1
;
}
}
x_dims_array
=
x_dim
.
Vectorize
();
for
(
in
t
i
=
0
;
i
<
y_dim
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
y_dim
.
size
();
++
i
)
{
y_dims_array
[
i
+
axis
]
=
y_dim
[
i
];
}
}
else
{
...
...
@@ -61,16 +62,16 @@ bool ElementwiseOp::InferShapeImpl() const {
x_dims_array
[
i
]
=
1
;
}
if
(
axis
+
x_dim
.
size
()
<
max_dim
)
{
for
(
in
t
i
=
axis
+
x_dim
.
size
();
i
<
max_dim
;
++
i
)
{
for
(
size_
t
i
=
axis
+
x_dim
.
size
();
i
<
max_dim
;
++
i
)
{
x_dims_array
[
i
]
=
1
;
}
}
y_dims_array
=
y_dim
.
Vectorize
();
for
(
in
t
i
=
0
;
i
<
x_dim
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
x_dim
.
size
();
++
i
)
{
x_dims_array
[
i
+
axis
]
=
x_dim
[
i
];
}
}
for
(
in
t
i
=
0
;
i
<
max_dim
;
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
max_dim
;
i
++
)
{
if
(
x_dims_array
[
i
]
==
-
1
||
y_dims_array
[
i
]
==
-
1
)
{
out_dims_array
[
i
]
=
-
1
;
}
else
{
...
...
lite/operators/expand_op.cc
浏览文件 @
2997b937
...
...
@@ -27,7 +27,7 @@ bool ExpandOpLite::CheckShape() const {
CHECK_EQ
(
expand_size
,
x_dims_size
)
<<
"The number of expand_times size must be qual to the rank of "
"Input(X)."
;
CHECK_LE
(
param_
.
X
->
dims
().
size
(),
6
)
CHECK_LE
(
param_
.
X
->
dims
().
size
(),
6
u
)
<<
"The rank of Input(X) must not be greater than 6."
;
return
true
;
}
...
...
lite/operators/fill_constant_batch_size_like_op.cc
浏览文件 @
2997b937
...
...
@@ -22,7 +22,7 @@ namespace operators {
bool
FillConstantBatchSizeLikeOp
::
CheckShape
()
const
{
CHECK
(
param_
.
out
);
CHECK
(
param_
.
input
);
CHECK_GT
(
param_
.
shape
.
size
(),
0
);
CHECK_GT
(
param_
.
shape
.
size
(),
0
u
);
CHECK_GE
(
param_
.
input_dim_idx
,
0
);
CHECK_GE
(
param_
.
output_dim_idx
,
0
);
return
true
;
...
...
lite/operators/fill_constant_op.cc
浏览文件 @
2997b937
...
...
@@ -34,7 +34,7 @@ bool FillConstantOp::InferShapeImpl() const {
out_shape
.
push_back
(
shape_tensor_data
[
i
]);
}
}
else
if
(
!
shape_tensor_list
.
empty
())
{
for
(
in
t
i
=
0
;
i
<
shape_tensor_list
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
shape_tensor_list
.
size
();
i
++
)
{
out_shape
.
push_back
(
shape_tensor_list
[
i
]
->
data
<
int
>
()[
0
]);
}
}
else
if
(
!
param_
.
shape
.
empty
())
{
...
...
lite/operators/flatten_op.cc
浏览文件 @
2997b937
...
...
@@ -32,7 +32,7 @@ bool FlattenOp::InferShapeImpl() const {
*
out_lod
=
param_
.
x
->
lod
();
int64_t
outer
=
1
,
inner
=
1
;
for
(
in
t
i
=
0
;
i
<
x_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
x_dims
.
size
();
++
i
)
{
if
(
i
<
axis_
)
{
outer
*=
x_dims
[
i
];
}
else
{
...
...
lite/operators/interpolate_op.cc
浏览文件 @
2997b937
...
...
@@ -48,14 +48,14 @@ bool InterpolateOp::InferShapeImpl() const {
auto
OutSize
=
param_
.
OutSize
;
auto
Scale
=
param_
.
Scale
;
if
(
!
SizeTensor
.
empty
())
{
CHECK_EQ
(
SizeTensor
.
size
(),
2
)
CHECK_EQ
(
SizeTensor
.
size
(),
2
u
)
<<
"Input(SizeTensor)'size of Op(interpolate) must be 2. "
"Attr(out_shape)'s length must be 2 for 4-D input tensor."
;
out_h
=
SizeTensor
[
0
]
->
data
<
int
>
()[
0
];
out_w
=
SizeTensor
[
1
]
->
data
<
int
>
()[
0
];
}
else
if
(
OutSize
)
{
auto
OutSize_dims
=
OutSize
->
dims
();
CHECK_EQ
(
OutSize_dims
.
size
(),
1
)
<<
"Input(OutSize)'s dims size must be 1"
;
CHECK_EQ
(
OutSize_dims
.
size
(),
1
u
)
<<
"Input(OutSize)'s dims size must be 1"
;
CHECK_EQ
(
OutSize_dims
[
0
],
2
)
<<
"OutSize's dim[0] must be 2"
;
auto
OutSize_data
=
OutSize
->
data
<
int
>
();
out_h
=
OutSize_data
[
0
];
...
...
lite/operators/pool_op.h
浏览文件 @
2997b937
...
...
@@ -105,7 +105,7 @@ inline void UpdatePadding(std::vector<int> *paddings,
const
std
::
vector
<
int
>
&
ksize
)
{
// when padding_algorithm is "VALID" or "SAME"
if
(
padding_algorithm
==
"SAME"
)
{
for
(
in
t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
int
out_size
=
(
data_dims
[
i
+
2
]
+
strides
[
i
]
-
1
)
/
strides
[
i
];
int
pad_sum
=
std
::
max
((
out_size
-
1
)
*
strides
[
i
]
+
ksize
[
i
]
-
data_dims
[
i
+
2
],
...
...
lite/operators/reduce_mean_op.cc
浏览文件 @
2997b937
...
...
@@ -29,7 +29,7 @@ bool ReduceMeanOp::CheckShape() const {
auto
x_dims
=
param_
.
X
->
dims
();
int
x_rank
=
x_dims
.
size
();
if
(
dims
.
size
()
!=
0
)
{
for
(
in
t
i
=
0
;
i
<
dims
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
dims
.
size
();
i
++
)
{
if
(
dims
[
i
]
<
0
)
{
dims
[
i
]
=
x_rank
+
dims
[
i
];
}
...
...
@@ -46,7 +46,7 @@ bool ReduceMeanOp::InferShapeImpl() const {
bool
keep_dim
=
param_
.
keep_dim
;
auto
x_rank
=
x_dims
.
size
();
if
(
dims
.
size
()
!=
0
)
{
for
(
in
t
i
=
0
;
i
<
dims
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
dims
.
size
();
i
++
)
{
if
(
dims
[
i
]
<
0
)
{
dims
[
i
]
=
x_rank
+
dims
[
i
];
}
...
...
@@ -65,7 +65,7 @@ bool ReduceMeanOp::InferShapeImpl() const {
out_dims
.
push_back
(
1
);
}
}
else
{
for
(
in
t
i
=
0
;
i
<
x_dims
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
x_dims
.
size
();
i
++
)
{
out_dims
.
push_back
(
x_dims
[
i
]);
}
if
(
keep_dim
)
{
...
...
lite/operators/reshape_op.cc
浏览文件 @
2997b937
...
...
@@ -70,7 +70,7 @@ bool ReshapeOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) {
param_
.
shape_tensor_vct
.
push_back
(
var
->
GetMutable
<
lite
::
Tensor
>
());
}
}
CHECK_GT
(
param_
.
shape_tensor_vct
.
size
(),
0
)
CHECK_GT
(
param_
.
shape_tensor_vct
.
size
(),
0
u
)
<<
"ShapeError: When `shape` in ReshapeOp is a list or tuple "
"which contains Tensor, the shape's size can't be zero. "
"But received shape's size is "
...
...
@@ -145,7 +145,7 @@ std::vector<DDim::value_type> ValidateShape(const std::vector<int> &shape,
<<
"Only one input dimension of Attr(shape) can be unknown."
;
unk_dim_idx
=
i
;
}
else
if
(
shape
[
i
]
==
copy_dim_val
)
{
CHECK_LT
(
static_cast
<
int
>
(
i
)
,
input_dims
.
size
())
CHECK_LT
(
i
,
input_dims
.
size
())
<<
"The index of dimension to copy from input shape must be less "
"than the size of input shape."
;
}
else
{
...
...
lite/operators/search_fc_op.cc
浏览文件 @
2997b937
...
...
@@ -41,11 +41,11 @@ bool SearchFcOpLite::CheckShape() const {
CHECK_OR_FALSE
(
param_
.
Out
);
auto
x_dims
=
param_
.
X
->
dims
();
CHECK_EQ
(
x_dims
.
size
(),
2
)
<<
"The rank of X(Input) should be 2."
;
CHECK_EQ
(
x_dims
.
size
(),
2
u
)
<<
"The rank of X(Input) should be 2."
;
auto
w_dims
=
param_
.
W
->
dims
();
CHECK_EQ
(
w_dims
.
size
(),
2
)
<<
"W should be 2-D tensor."
;
CHECK_EQ
(
w_dims
.
size
(),
2
u
)
<<
"W should be 2-D tensor."
;
auto
b_dims
=
param_
.
b
->
dims
();
CHECK_EQ
(
b_dims
.
size
(),
1
)
<<
"b should be 1-D tensor."
;
CHECK_EQ
(
b_dims
.
size
(),
1
u
)
<<
"b should be 1-D tensor."
;
CHECK_EQ
(
w_dims
[
1
],
x_dims
[
1
])
<<
"wrong shape: w_dims[1] != x_dims[1]"
;
return
true
;
}
...
...
lite/operators/slice_op.cc
浏览文件 @
2997b937
...
...
@@ -22,7 +22,7 @@ namespace operators {
bool
SliceOp
::
CheckShape
()
const
{
CHECK_OR_FALSE
(
param_
.
X
);
CHECK_OR_FALSE
(
param_
.
Out
);
CHECK_LT
(
param_
.
X
->
dims
().
size
(),
7
)
CHECK_LT
(
param_
.
X
->
dims
().
size
(),
7
u
)
<<
"The rank of input X should be less than 7"
;
return
true
;
}
...
...
@@ -67,7 +67,7 @@ bool SliceOp::InferShapeImpl() const {
}
out_dims
[
decrease_axis
[
i
]]
=
0
;
}
for
(
in
t
i
=
0
;
i
<
out_dims
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
out_dims
.
size
();
++
i
)
{
if
(
out_dims
[
i
]
!=
0
)
{
new_out_shape
.
push_back
(
out_dims
[
i
]);
}
...
...
@@ -108,7 +108,7 @@ bool SliceOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) {
// The priority: StartsTensor > StartsTensorList > attr(starts).
// The priority: EndsTensor > EndsTensorList > attr(ends).
in
t
starts_size
,
ends_size
;
size_
t
starts_size
,
ends_size
;
if
(
opdesc
.
HasAttr
(
"starts"
))
{
param_
.
starts
=
opdesc
.
GetAttr
<
std
::
vector
<
int
>>
(
"starts"
);
}
...
...
@@ -129,7 +129,7 @@ bool SliceOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) {
param_
.
StartsTensorList
.
push_back
(
scope
->
FindVar
(
var
)
->
GetMutable
<
lite
::
Tensor
>
());
}
CHECK_GT
(
param_
.
StartsTensorList
.
size
(),
0
)
CHECK_GT
(
param_
.
StartsTensorList
.
size
(),
0
u
)
<<
"StartsTensorList size can't be zero"
;
starts_size
=
param_
.
StartsTensorList
.
size
();
}
...
...
@@ -141,7 +141,7 @@ bool SliceOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) {
param_
.
EndsTensorList
.
push_back
(
scope
->
FindVar
(
var
)
->
GetMutable
<
lite
::
Tensor
>
());
}
CHECK_GT
(
param_
.
EndsTensorList
.
size
(),
0
)
CHECK_GT
(
param_
.
EndsTensorList
.
size
(),
0
u
)
<<
"EndsTensorList size can't be zero"
;
ends_size
=
param_
.
EndsTensorList
.
size
();
}
...
...
lite/operators/split_op.cc
浏览文件 @
2997b937
...
...
@@ -67,7 +67,7 @@ bool SplitOp::InferShapeImpl() const {
axis
=
param_
.
axis_tensor
->
data
<
int
>
()[
0
];
}
for
(
in
t
j
=
0
;
j
<
outs_dims
.
size
();
++
j
)
{
for
(
size_
t
j
=
0
;
j
<
outs_dims
.
size
();
++
j
)
{
outs
[
j
]
->
Resize
(
outs_dims
[
j
]);
}
...
...
lite/operators/squeeze_op.cc
浏览文件 @
2997b937
...
...
@@ -28,7 +28,7 @@ static DDim GetOutputShape(const std::vector<int> &squeeze_dims,
// Determines number of dimensions of output tensor after squeeze.
// Mark and count the dimensions need to be squeezed
if
(
num_squeeze_dims
==
0
)
{
for
(
in
t
idx
=
0
;
idx
<
in_dims
.
size
();
++
idx
)
{
for
(
size_
t
idx
=
0
;
idx
<
in_dims
.
size
();
++
idx
)
{
if
(
in_dims
[
idx
]
==
1
)
{
should_squeeze
[
idx
]
=
true
;
++
cnt_squeezed_dims
;
...
...
@@ -57,7 +57,7 @@ static DDim GetOutputShape(const std::vector<int> &squeeze_dims,
// Make output dimensions
std
::
vector
<
int64_t
>
output_shape
(
in_dims
.
size
()
-
cnt_squeezed_dims
,
0
);
for
(
in
t
in_idx
=
0
,
out_idx
=
0
;
in_idx
<
in_dims
.
size
();
++
in_idx
)
{
for
(
size_
t
in_idx
=
0
,
out_idx
=
0
;
in_idx
<
in_dims
.
size
();
++
in_idx
)
{
if
(
!
should_squeeze
[
in_idx
])
{
output_shape
[
out_idx
++
]
=
in_dims
[
in_idx
];
}
...
...
lite/operators/unsqueeze_op.cc
浏览文件 @
2997b937
...
...
@@ -75,7 +75,7 @@ bool UnsqueezeOp::InferShapeImpl() const {
final_axes
=
std
::
vector
<
int
>
(
axes_tensor_data
,
axes_tensor_data
+
axes_tensor
->
numel
());
}
else
if
(
!
axes_tensor_vct
.
empty
())
{
for
(
in
t
i
=
0
;
i
<
axes_tensor_vct
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
axes_tensor_vct
.
size
();
i
++
)
{
final_axes
.
push_back
(
axes_tensor_vct
[
i
]
->
data
<
int
>
()[
0
]);
}
}
else
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录