Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
cda2e2d9
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
cda2e2d9
编写于
9月 21, 2020
作者:
H
huzhiqiang
提交者:
GitHub
9月 21, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Windows] Fix compiling error on develop branch (#4383)
上级
ca9ec692
变更
20
隐藏空白更改
内联
并排
Showing
20 changed file
with
84 addition
and
85 deletion
+84
-85
lite/backends/x86/math/context_project.h
lite/backends/x86/math/context_project.h
+4
-4
lite/backends/x86/math/pooling.cc
lite/backends/x86/math/pooling.cc
+40
-40
lite/backends/x86/math/sequence_padding.h
lite/backends/x86/math/sequence_padding.h
+1
-1
lite/backends/x86/parallel.h
lite/backends/x86/parallel.h
+3
-3
lite/core/mir/memory_optimize_pass.cc
lite/core/mir/memory_optimize_pass.cc
+1
-1
lite/core/mir/static_kernel_pick_pass.h
lite/core/mir/static_kernel_pick_pass.h
+1
-1
lite/kernels/host/crf_decoding_compute.h
lite/kernels/host/crf_decoding_compute.h
+2
-2
lite/kernels/host/multiclass_nms_compute.cc
lite/kernels/host/multiclass_nms_compute.cc
+4
-4
lite/kernels/host/print_compute.cc
lite/kernels/host/print_compute.cc
+1
-1
lite/kernels/host/retinanet_detection_output_compute.cc
lite/kernels/host/retinanet_detection_output_compute.cc
+8
-8
lite/kernels/x86/elementwise_op_function.h
lite/kernels/x86/elementwise_op_function.h
+1
-1
lite/kernels/x86/sequence_arithmetic_compute.h
lite/kernels/x86/sequence_arithmetic_compute.h
+3
-3
lite/kernels/x86/sequence_conv_compute.h
lite/kernels/x86/sequence_conv_compute.h
+2
-2
lite/kernels/x86/slice_compute.h
lite/kernels/x86/slice_compute.h
+4
-4
lite/model_parser/model_parser.cc
lite/model_parser/model_parser.cc
+2
-2
lite/operators/conv_op.cc
lite/operators/conv_op.cc
+1
-1
lite/operators/elementwise_ops.cc
lite/operators/elementwise_ops.cc
+1
-1
lite/operators/pool_op.h
lite/operators/pool_op.h
+2
-2
lite/operators/slice_op.cc
lite/operators/slice_op.cc
+3
-3
lite/tools/build_windows.bat
lite/tools/build_windows.bat
+0
-1
未找到文件。
lite/backends/x86/math/context_project.h
浏览文件 @
cda2e2d9
...
@@ -161,7 +161,7 @@ class ContextProjectFunctor {
...
@@ -161,7 +161,7 @@ class ContextProjectFunctor {
sequence_width
});
sequence_width
});
if
(
up_pad
>
0
)
{
// add up pad
if
(
up_pad
>
0
)
{
// add up pad
int
padding_rows
=
std
::
min
(
int
padding_rows
=
(
std
::
min
)
(
up_pad
,
static_cast
<
int
>
(
lod_level_0
[
i
+
1
]
-
lod_level_0
[
i
]));
up_pad
,
static_cast
<
int
>
(
lod_level_0
[
i
+
1
]
-
lod_level_0
[
i
]));
for
(
int
k
=
0
;
k
<
padding_rows
;
++
k
)
{
for
(
int
k
=
0
;
k
<
padding_rows
;
++
k
)
{
...
@@ -180,10 +180,10 @@ class ContextProjectFunctor {
...
@@ -180,10 +180,10 @@ class ContextProjectFunctor {
}
}
if
(
down_pad
>
0
)
{
// add down pad
if
(
down_pad
>
0
)
{
// add down pad
int
down_pad_begin_row
=
int
down_pad_begin_row
=
std
::
max
(
0
,
(
std
::
max
)(
(
sequence_height
-
context_start
-
context_length
)
+
1
)
+
0
,
(
sequence_height
-
context_start
-
context_length
)
+
1
)
+
1
;
1
;
int
padding_begin
=
std
::
max
(
0
,
context_start
-
sequence_height
);
int
padding_begin
=
(
std
::
max
)
(
0
,
context_start
-
sequence_height
);
int
padding_size
=
int
padding_size
=
sequence_height
-
context_start
>=
context_length
sequence_height
-
context_start
>=
context_length
?
1
?
1
...
...
lite/backends/x86/math/pooling.cc
浏览文件 @
cda2e2d9
...
@@ -67,8 +67,8 @@ class Pool2dFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -67,8 +67,8 @@ class Pool2dFunctor<lite::TargetType::kX86, PoolProcess, T> {
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
}
else
{
}
else
{
hstart
=
ph
*
stride_height
-
padding_height
;
hstart
=
ph
*
stride_height
-
padding_height
;
hend
=
std
::
min
(
hstart
+
ksize_height
,
input_height
);
hend
=
(
std
::
min
)
(
hstart
+
ksize_height
,
input_height
);
hstart
=
std
::
max
(
hstart
,
0
);
hstart
=
(
std
::
max
)
(
hstart
,
0
);
}
}
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
if
(
adaptive
)
{
if
(
adaptive
)
{
...
@@ -76,8 +76,8 @@ class Pool2dFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -76,8 +76,8 @@ class Pool2dFunctor<lite::TargetType::kX86, PoolProcess, T> {
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
}
else
{
}
else
{
wstart
=
pw
*
stride_width
-
padding_width
;
wstart
=
pw
*
stride_width
-
padding_width
;
wend
=
std
::
min
(
wstart
+
ksize_width
,
input_width
);
wend
=
(
std
::
min
)
(
wstart
+
ksize_width
,
input_width
);
wstart
=
std
::
max
(
wstart
,
0
);
wstart
=
(
std
::
max
)
(
wstart
,
0
);
}
}
T
ele
=
pool_process
.
initial
();
T
ele
=
pool_process
.
initial
();
...
@@ -150,8 +150,8 @@ class Pool2dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -150,8 +150,8 @@ class Pool2dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
}
else
{
}
else
{
hstart
=
ph
*
stride_height
-
padding_height
;
hstart
=
ph
*
stride_height
-
padding_height
;
hend
=
std
::
min
(
hstart
+
ksize_height
,
input_height
);
hend
=
(
std
::
min
)
(
hstart
+
ksize_height
,
input_height
);
hstart
=
std
::
max
(
hstart
,
0
);
hstart
=
(
std
::
max
)
(
hstart
,
0
);
}
}
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
if
(
adaptive
)
{
if
(
adaptive
)
{
...
@@ -159,8 +159,8 @@ class Pool2dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -159,8 +159,8 @@ class Pool2dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
}
else
{
}
else
{
wstart
=
pw
*
stride_width
-
padding_width
;
wstart
=
pw
*
stride_width
-
padding_width
;
wend
=
std
::
min
(
wstart
+
ksize_width
,
input_width
);
wend
=
(
std
::
min
)
(
wstart
+
ksize_width
,
input_width
);
wstart
=
std
::
max
(
wstart
,
0
);
wstart
=
(
std
::
max
)
(
wstart
,
0
);
}
}
int
pool_size
=
(
exclusive
||
adaptive
)
int
pool_size
=
(
exclusive
||
adaptive
)
?
(
hend
-
hstart
)
*
(
wend
-
wstart
)
?
(
hend
-
hstart
)
*
(
wend
-
wstart
)
...
@@ -228,12 +228,12 @@ class MaxPool2dGradFunctor<lite::TargetType::kX86, T> {
...
@@ -228,12 +228,12 @@ class MaxPool2dGradFunctor<lite::TargetType::kX86, T> {
for
(
int
c
=
0
;
c
<
output_channels
;
++
c
)
{
for
(
int
c
=
0
;
c
<
output_channels
;
++
c
)
{
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
int
hstart
=
ph
*
stride_height
-
padding_height
;
int
hstart
=
ph
*
stride_height
-
padding_height
;
int
hend
=
std
::
min
(
hstart
+
ksize_height
,
input_height
);
int
hend
=
(
std
::
min
)
(
hstart
+
ksize_height
,
input_height
);
hstart
=
std
::
max
(
hstart
,
0
);
hstart
=
(
std
::
max
)
(
hstart
,
0
);
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
int
wstart
=
pw
*
stride_width
-
padding_width
;
int
wstart
=
pw
*
stride_width
-
padding_width
;
int
wend
=
std
::
min
(
wstart
+
ksize_width
,
input_width
);
int
wend
=
(
std
::
min
)
(
wstart
+
ksize_width
,
input_width
);
wstart
=
std
::
max
(
wstart
,
0
);
wstart
=
(
std
::
max
)
(
wstart
,
0
);
bool
stop
=
false
;
bool
stop
=
false
;
for
(
int
h
=
hstart
;
h
<
hend
&&
!
stop
;
++
h
)
{
for
(
int
h
=
hstart
;
h
<
hend
&&
!
stop
;
++
h
)
{
...
@@ -337,8 +337,8 @@ class Pool3dFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -337,8 +337,8 @@ class Pool3dFunctor<lite::TargetType::kX86, PoolProcess, T> {
dend
=
AdaptEndIndex
(
pd
,
input_depth
,
output_depth
);
dend
=
AdaptEndIndex
(
pd
,
input_depth
,
output_depth
);
}
else
{
}
else
{
dstart
=
pd
*
stride_depth
-
padding_depth
;
dstart
=
pd
*
stride_depth
-
padding_depth
;
dend
=
std
::
min
(
dstart
+
ksize_depth
,
input_depth
);
dend
=
(
std
::
min
)
(
dstart
+
ksize_depth
,
input_depth
);
dstart
=
std
::
max
(
dstart
,
0
);
dstart
=
(
std
::
max
)
(
dstart
,
0
);
}
}
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
if
(
adaptive
)
{
if
(
adaptive
)
{
...
@@ -346,8 +346,8 @@ class Pool3dFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -346,8 +346,8 @@ class Pool3dFunctor<lite::TargetType::kX86, PoolProcess, T> {
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
}
else
{
}
else
{
hstart
=
ph
*
stride_height
-
padding_height
;
hstart
=
ph
*
stride_height
-
padding_height
;
hend
=
std
::
min
(
hstart
+
ksize_height
,
input_height
);
hend
=
(
std
::
min
)
(
hstart
+
ksize_height
,
input_height
);
hstart
=
std
::
max
(
hstart
,
0
);
hstart
=
(
std
::
max
)
(
hstart
,
0
);
}
}
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
if
(
adaptive
)
{
if
(
adaptive
)
{
...
@@ -355,8 +355,8 @@ class Pool3dFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -355,8 +355,8 @@ class Pool3dFunctor<lite::TargetType::kX86, PoolProcess, T> {
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
}
else
{
}
else
{
wstart
=
pw
*
stride_width
-
padding_width
;
wstart
=
pw
*
stride_width
-
padding_width
;
wend
=
std
::
min
(
wstart
+
ksize_width
,
input_width
);
wend
=
(
std
::
min
)
(
wstart
+
ksize_width
,
input_width
);
wstart
=
std
::
max
(
wstart
,
0
);
wstart
=
(
std
::
max
)
(
wstart
,
0
);
}
}
int
output_idx
=
(
pd
*
output_height
+
ph
)
*
output_width
+
pw
;
int
output_idx
=
(
pd
*
output_height
+
ph
)
*
output_width
+
pw
;
T
ele
=
pool_process
.
initial
();
T
ele
=
pool_process
.
initial
();
...
@@ -441,8 +441,8 @@ class Pool3dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -441,8 +441,8 @@ class Pool3dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
dend
=
AdaptEndIndex
(
pd
,
input_depth
,
output_depth
);
dend
=
AdaptEndIndex
(
pd
,
input_depth
,
output_depth
);
}
else
{
}
else
{
dstart
=
pd
*
stride_depth
-
padding_depth
;
dstart
=
pd
*
stride_depth
-
padding_depth
;
dend
=
std
::
min
(
dstart
+
ksize_depth
,
input_depth
);
dend
=
(
std
::
min
)
(
dstart
+
ksize_depth
,
input_depth
);
dstart
=
std
::
max
(
dstart
,
0
);
dstart
=
(
std
::
max
)
(
dstart
,
0
);
}
}
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
if
(
adaptive
)
{
if
(
adaptive
)
{
...
@@ -450,8 +450,8 @@ class Pool3dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -450,8 +450,8 @@ class Pool3dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
}
else
{
}
else
{
hstart
=
ph
*
stride_height
-
padding_height
;
hstart
=
ph
*
stride_height
-
padding_height
;
hend
=
std
::
min
(
hstart
+
ksize_height
,
input_height
);
hend
=
(
std
::
min
)
(
hstart
+
ksize_height
,
input_height
);
hstart
=
std
::
max
(
hstart
,
0
);
hstart
=
(
std
::
max
)
(
hstart
,
0
);
}
}
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
if
(
adaptive
)
{
if
(
adaptive
)
{
...
@@ -459,8 +459,8 @@ class Pool3dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
...
@@ -459,8 +459,8 @@ class Pool3dGradFunctor<lite::TargetType::kX86, PoolProcess, T> {
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
}
else
{
}
else
{
wstart
=
pw
*
stride_width
-
padding_width
;
wstart
=
pw
*
stride_width
-
padding_width
;
wend
=
std
::
min
(
wstart
+
ksize_width
,
input_width
);
wend
=
(
std
::
min
)
(
wstart
+
ksize_width
,
input_width
);
wstart
=
std
::
max
(
wstart
,
0
);
wstart
=
(
std
::
max
)
(
wstart
,
0
);
}
}
int
pool_size
=
int
pool_size
=
...
@@ -540,16 +540,16 @@ class MaxPool3dGradFunctor<lite::TargetType::kX86, T> {
...
@@ -540,16 +540,16 @@ class MaxPool3dGradFunctor<lite::TargetType::kX86, T> {
for
(
int
c
=
0
;
c
<
output_channels
;
++
c
)
{
for
(
int
c
=
0
;
c
<
output_channels
;
++
c
)
{
for
(
int
pd
=
0
;
pd
<
output_depth
;
++
pd
)
{
for
(
int
pd
=
0
;
pd
<
output_depth
;
++
pd
)
{
int
dstart
=
pd
*
stride_depth
-
padding_depth
;
int
dstart
=
pd
*
stride_depth
-
padding_depth
;
int
dend
=
std
::
min
(
dstart
+
ksize_depth
,
input_depth
);
int
dend
=
(
std
::
min
)
(
dstart
+
ksize_depth
,
input_depth
);
dstart
=
std
::
max
(
dstart
,
0
);
dstart
=
(
std
::
max
)
(
dstart
,
0
);
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
int
hstart
=
ph
*
stride_height
-
padding_height
;
int
hstart
=
ph
*
stride_height
-
padding_height
;
int
hend
=
std
::
min
(
hstart
+
ksize_height
,
input_height
);
int
hend
=
(
std
::
min
)
(
hstart
+
ksize_height
,
input_height
);
hstart
=
std
::
max
(
hstart
,
0
);
hstart
=
(
std
::
max
)
(
hstart
,
0
);
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
int
wstart
=
pw
*
stride_width
-
padding_width
;
int
wstart
=
pw
*
stride_width
-
padding_width
;
int
wend
=
std
::
min
(
wstart
+
ksize_width
,
input_width
);
int
wend
=
(
std
::
min
)
(
wstart
+
ksize_width
,
input_width
);
wstart
=
std
::
max
(
wstart
,
0
);
wstart
=
(
std
::
max
)
(
wstart
,
0
);
bool
stop
=
false
;
bool
stop
=
false
;
for
(
int
d
=
dstart
;
d
<
dend
&&
!
stop
;
++
d
)
{
for
(
int
d
=
dstart
;
d
<
dend
&&
!
stop
;
++
d
)
{
for
(
int
h
=
hstart
;
h
<
hend
&&
!
stop
;
++
h
)
{
for
(
int
h
=
hstart
;
h
<
hend
&&
!
stop
;
++
h
)
{
...
@@ -651,8 +651,8 @@ class MaxPool2dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
...
@@ -651,8 +651,8 @@ class MaxPool2dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
}
else
{
}
else
{
hstart
=
ph
*
stride_height
-
padding_height
;
hstart
=
ph
*
stride_height
-
padding_height
;
hend
=
std
::
min
(
hstart
+
ksize_height
,
input_height
);
hend
=
(
std
::
min
)
(
hstart
+
ksize_height
,
input_height
);
hstart
=
std
::
max
(
hstart
,
0
);
hstart
=
(
std
::
max
)
(
hstart
,
0
);
}
}
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
if
(
adaptive
)
{
if
(
adaptive
)
{
...
@@ -660,8 +660,8 @@ class MaxPool2dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
...
@@ -660,8 +660,8 @@ class MaxPool2dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
}
else
{
}
else
{
wstart
=
pw
*
stride_width
-
padding_width
;
wstart
=
pw
*
stride_width
-
padding_width
;
wend
=
std
::
min
(
wstart
+
ksize_width
,
input_width
);
wend
=
(
std
::
min
)
(
wstart
+
ksize_width
,
input_width
);
wstart
=
std
::
max
(
wstart
,
0
);
wstart
=
(
std
::
max
)
(
wstart
,
0
);
}
}
T1
ele
=
static_cast
<
T1
>
(
-
FLT_MAX
);
T1
ele
=
static_cast
<
T1
>
(
-
FLT_MAX
);
...
@@ -794,8 +794,8 @@ class MaxPool3dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
...
@@ -794,8 +794,8 @@ class MaxPool3dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
dend
=
AdaptEndIndex
(
pd
,
input_depth
,
output_depth
);
dend
=
AdaptEndIndex
(
pd
,
input_depth
,
output_depth
);
}
else
{
}
else
{
dstart
=
pd
*
stride_depth
-
padding_depth
;
dstart
=
pd
*
stride_depth
-
padding_depth
;
dend
=
std
::
min
(
dstart
+
ksize_depth
,
input_depth
);
dend
=
(
std
::
min
)
(
dstart
+
ksize_depth
,
input_depth
);
dstart
=
std
::
max
(
dstart
,
0
);
dstart
=
(
std
::
max
)
(
dstart
,
0
);
}
}
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
for
(
int
ph
=
0
;
ph
<
output_height
;
++
ph
)
{
if
(
adaptive
)
{
if
(
adaptive
)
{
...
@@ -803,8 +803,8 @@ class MaxPool3dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
...
@@ -803,8 +803,8 @@ class MaxPool3dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
hend
=
AdaptEndIndex
(
ph
,
input_height
,
output_height
);
}
else
{
}
else
{
hstart
=
ph
*
stride_height
-
padding_height
;
hstart
=
ph
*
stride_height
-
padding_height
;
hend
=
std
::
min
(
hstart
+
ksize_height
,
input_height
);
hend
=
(
std
::
min
)
(
hstart
+
ksize_height
,
input_height
);
hstart
=
std
::
max
(
hstart
,
0
);
hstart
=
(
std
::
max
)
(
hstart
,
0
);
}
}
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
for
(
int
pw
=
0
;
pw
<
output_width
;
++
pw
)
{
if
(
adaptive
)
{
if
(
adaptive
)
{
...
@@ -812,8 +812,8 @@ class MaxPool3dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
...
@@ -812,8 +812,8 @@ class MaxPool3dWithIndexFunctor<lite::TargetType::kX86, T1, T2> {
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
wend
=
AdaptEndIndex
(
pw
,
input_width
,
output_width
);
}
else
{
}
else
{
wstart
=
pw
*
stride_width
-
padding_width
;
wstart
=
pw
*
stride_width
-
padding_width
;
wend
=
std
::
min
(
wstart
+
ksize_width
,
input_width
);
wend
=
(
std
::
min
)
(
wstart
+
ksize_width
,
input_width
);
wstart
=
std
::
max
(
wstart
,
0
);
wstart
=
(
std
::
max
)
(
wstart
,
0
);
}
}
int
output_idx
=
(
pd
*
output_height
+
ph
)
*
output_width
+
pw
;
int
output_idx
=
(
pd
*
output_height
+
ph
)
*
output_width
+
pw
;
...
...
lite/backends/x86/math/sequence_padding.h
浏览文件 @
cda2e2d9
...
@@ -35,7 +35,7 @@ inline static uint64_t MaximumSequenceLength(
...
@@ -35,7 +35,7 @@ inline static uint64_t MaximumSequenceLength(
uint64_t
seq_num
=
seq_offset
.
size
()
-
1
;
uint64_t
seq_num
=
seq_offset
.
size
()
-
1
;
uint64_t
max_seq_len
=
0
;
uint64_t
max_seq_len
=
0
;
for
(
size_t
i
=
0
;
i
<
seq_num
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
seq_num
;
++
i
)
{
max_seq_len
=
std
::
max
(
max_seq_len
,
seq_offset
[
i
+
1
]
-
seq_offset
[
i
]);
max_seq_len
=
(
std
::
max
)
(
max_seq_len
,
seq_offset
[
i
+
1
]
-
seq_offset
[
i
]);
}
}
return
max_seq_len
;
return
max_seq_len
;
}
}
...
...
lite/backends/x86/parallel.h
浏览文件 @
cda2e2d9
...
@@ -26,7 +26,7 @@ namespace x86 {
...
@@ -26,7 +26,7 @@ namespace x86 {
static
void
SetNumThreads
(
int
num_threads
)
{
static
void
SetNumThreads
(
int
num_threads
)
{
#ifdef PADDLE_WITH_MKLML
#ifdef PADDLE_WITH_MKLML
int
real_num_threads
=
std
::
max
(
num_threads
,
1
);
int
real_num_threads
=
(
std
::
max
)
(
num_threads
,
1
);
x86
::
MKL_Set_Num_Threads
(
real_num_threads
);
x86
::
MKL_Set_Num_Threads
(
real_num_threads
);
omp_set_num_threads
(
real_num_threads
);
omp_set_num_threads
(
real_num_threads
);
#endif
#endif
...
@@ -52,14 +52,14 @@ static inline void RunParallelFor(const int64_t begin,
...
@@ -52,14 +52,14 @@ static inline void RunParallelFor(const int64_t begin,
}
}
#ifdef PADDLE_WITH_MKLML
#ifdef PADDLE_WITH_MKLML
int64_t
num_threads
=
std
::
min
(
GetMaxThreads
(),
end
-
begin
);
int64_t
num_threads
=
(
std
::
min
)
(
GetMaxThreads
(),
end
-
begin
);
if
(
num_threads
>
1
)
{
if
(
num_threads
>
1
)
{
#pragma omp parallel num_threads(num_threads)
#pragma omp parallel num_threads(num_threads)
{
{
int64_t
tid
=
omp_get_thread_num
();
int64_t
tid
=
omp_get_thread_num
();
int64_t
chunk_size
=
(
end
-
begin
+
num_threads
-
1
)
/
num_threads
;
int64_t
chunk_size
=
(
end
-
begin
+
num_threads
-
1
)
/
num_threads
;
int64_t
begin_tid
=
begin
+
tid
*
chunk_size
;
int64_t
begin_tid
=
begin
+
tid
*
chunk_size
;
f
(
begin_tid
,
std
::
min
(
end
,
chunk_size
+
begin_tid
));
f
(
begin_tid
,
(
std
::
min
)
(
end
,
chunk_size
+
begin_tid
));
}
}
return
;
return
;
}
}
...
...
lite/core/mir/memory_optimize_pass.cc
浏览文件 @
cda2e2d9
...
@@ -148,7 +148,7 @@ void MemoryOptimizePass::CollectLifeCycleByDevice(
...
@@ -148,7 +148,7 @@ void MemoryOptimizePass::CollectLifeCycleByDevice(
int
cur_life
=
int
cur_life
=
(
*
lifecycles
)[
TargetToStr
(
target_type
)][
var_name
].
second
;
(
*
lifecycles
)[
TargetToStr
(
target_type
)][
var_name
].
second
;
(
*
lifecycles
)[
TargetToStr
(
target_type
)][
var_name
].
second
=
(
*
lifecycles
)[
TargetToStr
(
target_type
)][
var_name
].
second
=
std
::
max
(
max_lifecycle_
,
cur_life
);
(
std
::
max
)
(
max_lifecycle_
,
cur_life
);
}
}
}
}
++
max_lifecycle_
;
++
max_lifecycle_
;
...
...
lite/core/mir/static_kernel_pick_pass.h
浏览文件 @
cda2e2d9
...
@@ -61,7 +61,7 @@ class StaticKernelPickPass : public mir::StmtPass {
...
@@ -61,7 +61,7 @@ class StaticKernelPickPass : public mir::StmtPass {
float
final_score
{
-
1.
};
float
final_score
{
-
1.
};
Place
winner_place
{
places
[
0
]};
Place
winner_place
{
places
[
0
]};
const
int
kMax
=
const
int
kMax
=
std
::
numeric_limits
<
core
::
KernelPickFactor
::
value_type
>::
max
();
(
std
::
numeric_limits
<
core
::
KernelPickFactor
::
value_type
>::
max
)
();
size_t
place_size
=
places
.
size
();
size_t
place_size
=
places
.
size
();
// NOTE: We compare kernel's place with place in valid_places to select the
// NOTE: We compare kernel's place with place in valid_places to select the
...
...
lite/kernels/host/crf_decoding_compute.h
浏览文件 @
cda2e2d9
...
@@ -52,7 +52,7 @@ void Decode(const Tensor& emission_weights,
...
@@ -52,7 +52,7 @@ void Decode(const Tensor& emission_weights,
for
(
int
k
=
1
;
k
<
seq_len
;
++
k
)
{
for
(
int
k
=
1
;
k
<
seq_len
;
++
k
)
{
for
(
int
i
=
0
;
i
<
tag_num
;
++
i
)
{
for
(
int
i
=
0
;
i
<
tag_num
;
++
i
)
{
T
max_score
=
-
std
::
numeric_limits
<
T
>::
max
();
T
max_score
=
-
(
std
::
numeric_limits
<
T
>::
max
)
();
int
max_j
=
0
;
int
max_j
=
0
;
for
(
size_t
j
=
0
;
j
<
tag_num
;
++
j
)
{
for
(
size_t
j
=
0
;
j
<
tag_num
;
++
j
)
{
T
score
=
alpha_value
[(
k
-
1
)
*
tag_num
+
j
]
+
T
score
=
alpha_value
[(
k
-
1
)
*
tag_num
+
j
]
+
...
@@ -67,7 +67,7 @@ void Decode(const Tensor& emission_weights,
...
@@ -67,7 +67,7 @@ void Decode(const Tensor& emission_weights,
}
}
}
}
T
max_score
=
-
std
::
numeric_limits
<
T
>::
max
();
T
max_score
=
-
(
std
::
numeric_limits
<
T
>::
max
)
();
int
max_i
=
0
;
int
max_i
=
0
;
for
(
size_t
i
=
0
;
i
<
tag_num
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
tag_num
;
++
i
)
{
T
score
=
alpha_value
[(
seq_len
-
1
)
*
tag_num
+
i
]
+
w
[
tag_num
+
i
];
T
score
=
alpha_value
[(
seq_len
-
1
)
*
tag_num
+
i
]
+
w
[
tag_num
+
i
];
...
...
lite/kernels/host/multiclass_nms_compute.cc
浏览文件 @
cda2e2d9
...
@@ -72,10 +72,10 @@ static T JaccardOverlap(const T* box1, const T* box2, const bool normalized) {
...
@@ -72,10 +72,10 @@ static T JaccardOverlap(const T* box1, const T* box2, const bool normalized) {
box2
[
3
]
<
box1
[
1
])
{
box2
[
3
]
<
box1
[
1
])
{
return
static_cast
<
T
>
(
0.
);
return
static_cast
<
T
>
(
0.
);
}
else
{
}
else
{
const
T
inter_xmin
=
std
::
max
(
box1
[
0
],
box2
[
0
]);
const
T
inter_xmin
=
(
std
::
max
)
(
box1
[
0
],
box2
[
0
]);
const
T
inter_ymin
=
std
::
max
(
box1
[
1
],
box2
[
1
]);
const
T
inter_ymin
=
(
std
::
max
)
(
box1
[
1
],
box2
[
1
]);
const
T
inter_xmax
=
std
::
min
(
box1
[
2
],
box2
[
2
]);
const
T
inter_xmax
=
(
std
::
min
)
(
box1
[
2
],
box2
[
2
]);
const
T
inter_ymax
=
std
::
min
(
box1
[
3
],
box2
[
3
]);
const
T
inter_ymax
=
(
std
::
min
)
(
box1
[
3
],
box2
[
3
]);
T
norm
=
normalized
?
static_cast
<
T
>
(
0.
)
:
static_cast
<
T
>
(
1.
);
T
norm
=
normalized
?
static_cast
<
T
>
(
0.
)
:
static_cast
<
T
>
(
1.
);
T
inter_w
=
inter_xmax
-
inter_xmin
+
norm
;
T
inter_w
=
inter_xmax
-
inter_xmin
+
norm
;
T
inter_h
=
inter_ymax
-
inter_ymin
+
norm
;
T
inter_h
=
inter_ymax
-
inter_ymin
+
norm
;
...
...
lite/kernels/host/print_compute.cc
浏览文件 @
cda2e2d9
...
@@ -128,7 +128,7 @@ class TensorFormatter {
...
@@ -128,7 +128,7 @@ class TensorFormatter {
void
FormatData
(
const
Tensor
&
print_tensor
,
std
::
stringstream
&
log_stream
)
{
void
FormatData
(
const
Tensor
&
print_tensor
,
std
::
stringstream
&
log_stream
)
{
int64_t
print_size
=
summarize_
==
-
1
int64_t
print_size
=
summarize_
==
-
1
?
print_tensor
.
numel
()
?
print_tensor
.
numel
()
:
std
::
min
(
summarize_
,
print_tensor
.
numel
());
:
(
std
::
min
)
(
summarize_
,
print_tensor
.
numel
());
const
T
*
data
=
print_tensor
.
data
<
T
>
();
// Always kHost, so unnessary to
const
T
*
data
=
print_tensor
.
data
<
T
>
();
// Always kHost, so unnessary to
// copy the data from device
// copy the data from device
log_stream
<<
" - data: ["
;
log_stream
<<
" - data: ["
;
...
...
lite/kernels/host/retinanet_detection_output_compute.cc
浏览文件 @
cda2e2d9
...
@@ -83,10 +83,10 @@ static inline T JaccardOverlap(const std::vector<T>& box1,
...
@@ -83,10 +83,10 @@ static inline T JaccardOverlap(const std::vector<T>& box1,
box2
[
3
]
<
box1
[
1
])
{
box2
[
3
]
<
box1
[
1
])
{
return
static_cast
<
T
>
(
0.
);
return
static_cast
<
T
>
(
0.
);
}
else
{
}
else
{
const
T
inter_xmin
=
std
::
max
(
box1
[
0
],
box2
[
0
]);
const
T
inter_xmin
=
(
std
::
max
)
(
box1
[
0
],
box2
[
0
]);
const
T
inter_ymin
=
std
::
max
(
box1
[
1
],
box2
[
1
]);
const
T
inter_ymin
=
(
std
::
max
)
(
box1
[
1
],
box2
[
1
]);
const
T
inter_xmax
=
std
::
min
(
box1
[
2
],
box2
[
2
]);
const
T
inter_xmax
=
(
std
::
min
)
(
box1
[
2
],
box2
[
2
]);
const
T
inter_ymax
=
std
::
min
(
box1
[
3
],
box2
[
3
]);
const
T
inter_ymax
=
(
std
::
min
)
(
box1
[
3
],
box2
[
3
]);
T
norm
=
normalized
?
static_cast
<
T
>
(
0.
)
:
static_cast
<
T
>
(
1.
);
T
norm
=
normalized
?
static_cast
<
T
>
(
0.
)
:
static_cast
<
T
>
(
1.
);
T
inter_w
=
inter_xmax
-
inter_xmin
+
norm
;
T
inter_w
=
inter_xmax
-
inter_xmin
+
norm
;
T
inter_h
=
inter_ymax
-
inter_ymin
+
norm
;
T
inter_h
=
inter_ymax
-
inter_ymin
+
norm
;
...
@@ -183,10 +183,10 @@ void DeltaScoreToPrediction(
...
@@ -183,10 +183,10 @@ void DeltaScoreToPrediction(
pred_box_xmax
=
pred_box_xmax
/
im_scale
;
pred_box_xmax
=
pred_box_xmax
/
im_scale
;
pred_box_ymax
=
pred_box_ymax
/
im_scale
;
pred_box_ymax
=
pred_box_ymax
/
im_scale
;
pred_box_xmin
=
std
::
max
(
std
::
min
(
pred_box_xmin
,
im_width
-
1
),
zero
);
pred_box_xmin
=
(
std
::
max
)((
std
::
min
)
(
pred_box_xmin
,
im_width
-
1
),
zero
);
pred_box_ymin
=
std
::
max
(
std
::
min
(
pred_box_ymin
,
im_height
-
1
),
zero
);
pred_box_ymin
=
(
std
::
max
)((
std
::
min
)
(
pred_box_ymin
,
im_height
-
1
),
zero
);
pred_box_xmax
=
std
::
max
(
std
::
min
(
pred_box_xmax
,
im_width
-
1
),
zero
);
pred_box_xmax
=
(
std
::
max
)((
std
::
min
)
(
pred_box_xmax
,
im_width
-
1
),
zero
);
pred_box_ymax
=
std
::
max
(
std
::
min
(
pred_box_ymax
,
im_height
-
1
),
zero
);
pred_box_ymax
=
(
std
::
max
)((
std
::
min
)
(
pred_box_ymax
,
im_height
-
1
),
zero
);
std
::
vector
<
T
>
one_pred
;
std
::
vector
<
T
>
one_pred
;
one_pred
.
push_back
(
pred_box_xmin
);
one_pred
.
push_back
(
pred_box_xmin
);
...
...
lite/kernels/x86/elementwise_op_function.h
浏览文件 @
cda2e2d9
...
@@ -71,7 +71,7 @@ inline void get_mid_dims(const lite::DDim &x_dims,
...
@@ -71,7 +71,7 @@ inline void get_mid_dims(const lite::DDim &x_dims,
for
(
size_t
j
=
0
;
j
<
i
;
++
j
)
{
for
(
size_t
j
=
0
;
j
<
i
;
++
j
)
{
(
*
pre
)
*=
y_dims
[
j
];
(
*
pre
)
*=
y_dims
[
j
];
}
}
*
n
=
std
::
max
(
x_dims
[
i
+
axis
],
y_dims
[
i
]);
*
n
=
(
std
::
max
)
(
x_dims
[
i
+
axis
],
y_dims
[
i
]);
*
mid_flag
=
1
;
*
mid_flag
=
1
;
mid
=
i
;
mid
=
i
;
break
;
break
;
...
...
lite/kernels/x86/sequence_arithmetic_compute.h
浏览文件 @
cda2e2d9
...
@@ -55,7 +55,7 @@ class SequenceArithmeticCompute
...
@@ -55,7 +55,7 @@ class SequenceArithmeticCompute
auto
input_x
=
x_data
+
x_seq_offset
[
i
]
*
inner_size
;
auto
input_x
=
x_data
+
x_seq_offset
[
i
]
*
inner_size
;
auto
input_y
=
y_data
+
y_seq_offset
[
i
]
*
inner_size
;
auto
input_y
=
y_data
+
y_seq_offset
[
i
]
*
inner_size
;
auto
t_out
=
out_data
+
x_seq_offset
[
i
]
*
inner_size
;
auto
t_out
=
out_data
+
x_seq_offset
[
i
]
*
inner_size
;
int
len
=
std
::
min
(
len_x
,
len_y
);
int
len
=
(
std
::
min
)
(
len_x
,
len_y
);
for
(
int
j
=
0
;
j
<
len
;
j
++
)
{
for
(
int
j
=
0
;
j
<
len
;
j
++
)
{
t_out
[
j
]
=
input_x
[
j
]
+
input_y
[
j
];
t_out
[
j
]
=
input_x
[
j
]
+
input_y
[
j
];
}
}
...
@@ -73,7 +73,7 @@ class SequenceArithmeticCompute
...
@@ -73,7 +73,7 @@ class SequenceArithmeticCompute
auto
input_x
=
x_data
+
x_seq_offset
[
i
]
*
inner_size
;
auto
input_x
=
x_data
+
x_seq_offset
[
i
]
*
inner_size
;
auto
input_y
=
y_data
+
y_seq_offset
[
i
]
*
inner_size
;
auto
input_y
=
y_data
+
y_seq_offset
[
i
]
*
inner_size
;
auto
t_out
=
out_data
+
x_seq_offset
[
i
]
*
inner_size
;
auto
t_out
=
out_data
+
x_seq_offset
[
i
]
*
inner_size
;
int
len
=
std
::
min
(
len_x
,
len_y
);
int
len
=
(
std
::
min
)
(
len_x
,
len_y
);
for
(
int
j
=
0
;
j
<
len
;
j
++
)
{
for
(
int
j
=
0
;
j
<
len
;
j
++
)
{
t_out
[
j
]
=
input_x
[
j
]
-
input_y
[
j
];
t_out
[
j
]
=
input_x
[
j
]
-
input_y
[
j
];
}
}
...
@@ -91,7 +91,7 @@ class SequenceArithmeticCompute
...
@@ -91,7 +91,7 @@ class SequenceArithmeticCompute
auto
input_x
=
x_data
+
x_seq_offset
[
i
]
*
inner_size
;
auto
input_x
=
x_data
+
x_seq_offset
[
i
]
*
inner_size
;
auto
input_y
=
y_data
+
y_seq_offset
[
i
]
*
inner_size
;
auto
input_y
=
y_data
+
y_seq_offset
[
i
]
*
inner_size
;
auto
t_out
=
out_data
+
x_seq_offset
[
i
]
*
inner_size
;
auto
t_out
=
out_data
+
x_seq_offset
[
i
]
*
inner_size
;
int
len
=
std
::
min
(
len_x
,
len_y
);
int
len
=
(
std
::
min
)
(
len_x
,
len_y
);
for
(
int
j
=
0
;
j
<
len
;
j
++
)
{
for
(
int
j
=
0
;
j
<
len
;
j
++
)
{
t_out
[
j
]
=
input_x
[
j
]
*
input_y
[
j
];
t_out
[
j
]
=
input_x
[
j
]
*
input_y
[
j
];
}
}
...
...
lite/kernels/x86/sequence_conv_compute.h
浏览文件 @
cda2e2d9
...
@@ -49,8 +49,8 @@ class SequenceConvCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
...
@@ -49,8 +49,8 @@ class SequenceConvCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
bool
padding_trainable
=
false
;
bool
padding_trainable
=
false
;
const
Tensor
*
padding_data
=
nullptr
;
const
Tensor
*
padding_data
=
nullptr
;
int
up_pad
=
std
::
max
(
0
,
-
context_start
);
int
up_pad
=
(
std
::
max
)
(
0
,
-
context_start
);
int
down_pad
=
std
::
max
(
0
,
context_start
+
context_length
-
1
);
int
down_pad
=
(
std
::
max
)
(
0
,
context_start
+
context_length
-
1
);
auto
sequence_width
=
static_cast
<
int64_t
>
(
in
->
dims
()[
1
]);
auto
sequence_width
=
static_cast
<
int64_t
>
(
in
->
dims
()[
1
]);
std
::
vector
<
int64_t
>
col_shape
{
in
->
dims
()[
0
],
std
::
vector
<
int64_t
>
col_shape
{
in
->
dims
()[
0
],
...
...
lite/kernels/x86/slice_compute.h
浏览文件 @
cda2e2d9
...
@@ -102,9 +102,9 @@ void slice_compute(const lite::Tensor* in,
...
@@ -102,9 +102,9 @@ void slice_compute(const lite::Tensor* in,
start
=
starts
[
i
]
<
0
?
(
starts
[
i
]
+
dim_value
)
:
starts
[
i
];
start
=
starts
[
i
]
<
0
?
(
starts
[
i
]
+
dim_value
)
:
starts
[
i
];
end
=
ends
[
i
]
<
0
?
(
ends
[
i
]
+
dim_value
)
:
ends
[
i
];
end
=
ends
[
i
]
<
0
?
(
ends
[
i
]
+
dim_value
)
:
ends
[
i
];
start
=
std
::
max
(
start
,
0
);
start
=
(
std
::
max
)
(
start
,
0
);
end
=
std
::
max
(
end
,
0
);
end
=
(
std
::
max
)
(
end
,
0
);
end
=
std
::
min
(
end
,
dim_value
);
end
=
(
std
::
min
)
(
end
,
dim_value
);
CHECK_GT
(
end
,
start
)
<<
"end should greater than start"
;
CHECK_GT
(
end
,
start
)
<<
"end should greater than start"
;
out_dims
[
axes
[
i
]]
=
end
-
start
;
out_dims
[
axes
[
i
]]
=
end
-
start
;
}
}
...
@@ -172,7 +172,7 @@ void slice_compute(const lite::Tensor* in,
...
@@ -172,7 +172,7 @@ void slice_compute(const lite::Tensor* in,
if
(
start
<
0
)
{
if
(
start
<
0
)
{
start
=
(
start
+
in_dims
[
axes
[
i
]]);
start
=
(
start
+
in_dims
[
axes
[
i
]]);
}
}
start
=
std
::
max
(
start
,
0
);
start
=
(
std
::
max
)
(
start
,
0
);
offsets
[
axes
[
i
]]
=
start
;
offsets
[
axes
[
i
]]
=
start
;
}
}
auto
in_t
=
auto
in_t
=
...
...
lite/model_parser/model_parser.cc
浏览文件 @
cda2e2d9
...
@@ -391,7 +391,7 @@ void TensorToStream(std::ostream &os, const lite::Tensor &tensor) {
...
@@ -391,7 +391,7 @@ void TensorToStream(std::ostream &os, const lite::Tensor &tensor) {
}
}
{
// the 3rd field, tensor data
{
// the 3rd field, tensor data
uint64_t
size
=
tensor
.
memory_size
();
uint64_t
size
=
tensor
.
memory_size
();
CHECK_LT
(
size
,
std
::
numeric_limits
<
std
::
streamsize
>::
max
())
CHECK_LT
(
size
,
(
std
::
numeric_limits
<
std
::
streamsize
>::
max
)
())
<<
"Index overflow when writing tensor"
;
<<
"Index overflow when writing tensor"
;
#ifdef LITE_WITH_CUDA
#ifdef LITE_WITH_CUDA
...
@@ -461,7 +461,7 @@ void SetParamInfoNaive(naive_buffer::ParamDesc *param_desc,
...
@@ -461,7 +461,7 @@ void SetParamInfoNaive(naive_buffer::ParamDesc *param_desc,
}
}
desc
.
SetDim
(
tensor
.
dims
().
Vectorize
());
desc
.
SetDim
(
tensor
.
dims
().
Vectorize
());
uint64_t
size
=
tensor
.
memory_size
();
uint64_t
size
=
tensor
.
memory_size
();
CHECK_LT
(
size
,
std
::
numeric_limits
<
std
::
streamsize
>::
max
())
CHECK_LT
(
size
,
(
std
::
numeric_limits
<
std
::
streamsize
>::
max
)
())
<<
"Index overflow when writing tensor"
;
<<
"Index overflow when writing tensor"
;
#ifdef LITE_WITH_CUDA
#ifdef LITE_WITH_CUDA
...
...
lite/operators/conv_op.cc
浏览文件 @
cda2e2d9
...
@@ -62,7 +62,7 @@ void UpdatePaddingAndDilation(std::vector<int>* paddings,
...
@@ -62,7 +62,7 @@ void UpdatePaddingAndDilation(std::vector<int>* paddings,
if
(
padding_algorithm
==
"SAME"
)
{
if
(
padding_algorithm
==
"SAME"
)
{
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
int
out_size
=
(
data_dims
[
i
+
2
]
+
strides
[
i
]
-
1
)
/
strides
[
i
];
int
out_size
=
(
data_dims
[
i
+
2
]
+
strides
[
i
]
-
1
)
/
strides
[
i
];
int
pad_sum
=
std
::
max
(
int
pad_sum
=
(
std
::
max
)
(
(
out_size
-
1
)
*
strides
[
i
]
+
ksize
[
i
+
2
]
-
data_dims
[
i
+
2
],
(
out_size
-
1
)
*
strides
[
i
]
+
ksize
[
i
+
2
]
-
data_dims
[
i
+
2
],
(
int64_t
)
0
);
(
int64_t
)
0
);
int
pad_0
=
pad_sum
/
2
;
int
pad_0
=
pad_sum
/
2
;
...
...
lite/operators/elementwise_ops.cc
浏览文件 @
cda2e2d9
...
@@ -75,7 +75,7 @@ bool ElementwiseOp::InferShapeImpl() const {
...
@@ -75,7 +75,7 @@ bool ElementwiseOp::InferShapeImpl() const {
if
(
x_dims_array
[
i
]
==
-
1
||
y_dims_array
[
i
]
==
-
1
)
{
if
(
x_dims_array
[
i
]
==
-
1
||
y_dims_array
[
i
]
==
-
1
)
{
out_dims_array
[
i
]
=
-
1
;
out_dims_array
[
i
]
=
-
1
;
}
else
{
}
else
{
out_dims_array
[
i
]
=
std
::
max
(
x_dims_array
[
i
],
y_dims_array
[
i
]);
out_dims_array
[
i
]
=
(
std
::
max
)
(
x_dims_array
[
i
],
y_dims_array
[
i
]);
}
}
}
}
param_
.
Out
->
Resize
(
DDim
(
out_dims_array
));
param_
.
Out
->
Resize
(
DDim
(
out_dims_array
));
...
...
lite/operators/pool_op.h
浏览文件 @
cda2e2d9
...
@@ -128,8 +128,8 @@ inline void UpdatePadding(std::vector<int> *paddings,
...
@@ -128,8 +128,8 @@ inline void UpdatePadding(std::vector<int> *paddings,
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
int
out_size
=
(
data_dims
[
i
+
2
]
+
strides
[
i
]
-
1
)
/
strides
[
i
];
int
out_size
=
(
data_dims
[
i
+
2
]
+
strides
[
i
]
-
1
)
/
strides
[
i
];
int
pad_sum
=
int
pad_sum
=
std
::
max
((
out_size
-
1
)
*
strides
[
i
]
+
ksize
[
i
]
-
data_dims
[
i
+
2
],
(
std
::
max
)
((
out_size
-
1
)
*
strides
[
i
]
+
ksize
[
i
]
-
data_dims
[
i
+
2
],
(
int64_t
)
0
);
(
int64_t
)
0
);
int
pad_0
=
pad_sum
/
2
;
int
pad_0
=
pad_sum
/
2
;
int
pad_1
=
pad_sum
-
pad_0
;
int
pad_1
=
pad_sum
-
pad_0
;
*
(
paddings
->
begin
()
+
i
*
2
)
=
pad_0
;
*
(
paddings
->
begin
()
+
i
*
2
)
=
pad_0
;
...
...
lite/operators/slice_op.cc
浏览文件 @
cda2e2d9
...
@@ -51,9 +51,9 @@ bool SliceOp::InferShapeImpl() const {
...
@@ -51,9 +51,9 @@ bool SliceOp::InferShapeImpl() const {
if
(
dim_value
>
0
)
{
if
(
dim_value
>
0
)
{
start
=
starts
[
i
]
<
0
?
(
starts
[
i
]
+
dim_value
)
:
starts
[
i
];
start
=
starts
[
i
]
<
0
?
(
starts
[
i
]
+
dim_value
)
:
starts
[
i
];
end
=
ends
[
i
]
<
0
?
(
ends
[
i
]
+
dim_value
)
:
ends
[
i
];
end
=
ends
[
i
]
<
0
?
(
ends
[
i
]
+
dim_value
)
:
ends
[
i
];
start
=
std
::
max
(
start
,
0
);
start
=
(
std
::
max
)
(
start
,
0
);
end
=
std
::
max
(
end
,
0
);
end
=
(
std
::
max
)
(
end
,
0
);
end
=
std
::
min
(
end
,
dim_value
);
end
=
(
std
::
min
)
(
end
,
dim_value
);
out_dims
[
axes
[
i
]]
=
end
-
start
;
out_dims
[
axes
[
i
]]
=
end
-
start
;
}
}
}
}
...
...
lite/tools/build_windows.bat
浏览文件 @
cda2e2d9
...
@@ -100,7 +100,6 @@ cd "%build_directory%"
...
@@ -100,7 +100,6 @@ cd "%build_directory%"
-DPYTHON
_EXECUTABLE
=
"
%python_path%
"
-DPYTHON
_EXECUTABLE
=
"
%python_path%
"
call
"
%vcvarsall_dir%
"
amd64
call
"
%vcvarsall_dir%
"
amd64
cd
"
%build_directory%
"
if
"
%BUILD_FOR_CI%
"
==
"ON"
(
if
"
%BUILD_FOR_CI%
"
==
"ON"
(
msbuild
/m /p
:Configuration
=
Release
lite
\lite_compile_deps.vcxproj
msbuild
/m /p
:Configuration
=
Release
lite
\lite_compile_deps.vcxproj
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录