Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
1abd9921
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
332
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1abd9921
编写于
12月 25, 2019
作者:
L
Liu Yiqun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Use static array instead of std::vector in DDimLite.
test=develop
上级
28481458
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
79 addition
and
38 deletion
+79
-38
lite/core/tensor.cc
lite/core/tensor.cc
+13
-17
lite/core/tensor.h
lite/core/tensor.h
+60
-9
lite/kernels/host/multiclass_nms_compute.cc
lite/kernels/host/multiclass_nms_compute.cc
+4
-4
lite/kernels/x86/reshape_compute.h
lite/kernels/x86/reshape_compute.h
+0
-1
lite/operators/generate_proposals_op.cc
lite/operators/generate_proposals_op.cc
+0
-1
lite/operators/gru_unit_op.cc
lite/operators/gru_unit_op.cc
+0
-1
lite/operators/lookup_table_op.cc
lite/operators/lookup_table_op.cc
+0
-2
lite/operators/match_matrix_tensor_op.cc
lite/operators/match_matrix_tensor_op.cc
+0
-1
lite/tests/kernels/fc_compute_test.cc
lite/tests/kernels/fc_compute_test.cc
+0
-1
lite/tests/kernels/slice_compute_test.cc
lite/tests/kernels/slice_compute_test.cc
+2
-1
未找到文件。
lite/core/tensor.cc
浏览文件 @
1abd9921
...
...
@@ -25,35 +25,31 @@ using value_type = int64_t;
value_type
DDimLite
::
production
()
const
{
value_type
res
=
1
;
for
(
size_t
i
=
0
;
i
<
this
->
size
();
i
++
)
{
res
*=
(
*
this
)
[
i
];
for
(
size_t
i
=
0
;
i
<
data_
.
size
();
i
++
)
{
res
*=
data_
[
i
];
}
return
res
;
}
value_type
DDimLite
::
count
(
int
start
,
int
end
)
const
{
if
(
start
<
0
)
{
start
=
0
;
}
if
(
end
>
size
())
{
end
=
size
();
}
start
=
std
::
max
(
start
,
0
);
end
=
std
::
min
(
end
,
static_cast
<
int
>
(
data_
.
size
()));
if
(
end
<
start
)
{
end
=
start
;
return
0
;
}
value_type
sum
=
1
;
value_type
res
=
1
;
for
(
auto
i
=
start
;
i
<
end
;
++
i
)
{
sum
*=
data_
[
i
];
res
*=
data_
[
i
];
}
return
sum
;
return
res
;
}
DDimLite
DDimLite
::
Slice
(
int
start
,
int
end
)
const
{
st
d
::
vector
<
value_type
>
vec
;
for
(
int
i
=
start
;
i
<
end
;
i
++
)
{
vec
.
push_back
((
*
this
)[
i
])
;
}
return
DDimLite
(
vec
);
st
art
=
std
::
max
(
start
,
0
)
;
end
=
std
::
min
(
end
,
static_cast
<
int
>
(
data_
.
size
()));
value_type
arr
[
kMaxDimLength
]
;
memcpy
(
arr
,
data_
.
data
()
+
start
,
(
end
-
start
)
*
sizeof
(
value_type
));
return
DDimLite
(
arr
,
end
-
start
);
}
std
::
string
DDimLite
::
repr
()
const
{
...
...
lite/core/tensor.h
浏览文件 @
1abd9921
...
...
@@ -38,28 +38,69 @@ class TensorLite;
using
DDim
=
lite
::
DDimLite
;
using
Tensor
=
lite
::
TensorLite
;
template
<
typename
ValueType
,
int
DimLength
>
class
DimVector
{
public:
DimVector
()
{
memset
(
arr_
,
0
,
DimLength
*
sizeof
(
ValueType
));
size_
=
0
;
}
size_t
size
()
const
{
return
size_
;
}
void
resize
(
size_t
new_size
)
{
CHECK_LE
(
new_size
,
DimLength
);
size_
=
new_size
;
}
ValueType
*
data
()
{
return
arr_
;
}
const
ValueType
*
data
()
const
{
return
arr_
;
}
ValueType
operator
[](
int
offset
)
const
{
return
arr_
[
offset
];
}
ValueType
&
operator
[](
int
offset
)
{
return
arr_
[
offset
];
}
private:
ValueType
arr_
[
DimLength
];
size_t
size_
{
0
};
};
constexpr
int
kMaxDimLength
=
10
;
class
DDimLite
{
public:
using
value_type
=
int64_t
;
using
DDimVector
=
DimVector
<
value_type
,
kMaxDimLength
>
;
DDimLite
()
=
default
;
explicit
DDimLite
(
const
std
::
vector
<
value_type
>
&
x
)
{
ConstructFrom
(
x
);
}
// DDimLite(std::initializer_list<value_type> init_list) :
// DDimLite(std::vector<value_type>(init_list)) {}
explicit
DDimLite
(
const
value_type
*
arr
,
size_t
size
)
{
data_
.
resize
(
size
);
memcpy
(
data_
.
data
(),
arr
,
data_
.
size
()
*
sizeof
(
value_type
));
}
void
ConstructFrom
(
const
std
::
vector
<
value_type
>
&
x
)
{
data_
=
x
;
}
void
ConstructFrom
(
const
std
::
vector
<
value_type
>
&
x
)
{
data_
.
resize
(
x
.
size
());
memcpy
(
data_
.
data
(),
x
.
data
(),
x
.
size
()
*
sizeof
(
value_type
));
}
value_type
operator
[](
int
offset
)
const
{
return
data_
[
offset
];
}
value_type
&
operator
[](
int
offset
)
{
return
data_
[
offset
];
}
std
::
vector
<
int64_t
>
Vectorize
()
const
{
return
data_
;
}
std
::
vector
<
value_type
>
Vectorize
()
const
{
std
::
vector
<
value_type
>
vec
;
if
(
data_
.
size
()
>
0U
)
{
vec
.
resize
(
data_
.
size
());
memcpy
(
vec
.
data
(),
data_
.
data
(),
data_
.
size
()
*
sizeof
(
value_type
));
}
return
vec
;
}
size_t
size
()
const
{
return
data_
.
size
();
}
bool
empty
()
const
{
return
data_
.
empty
()
;
}
bool
empty
()
const
{
return
data_
.
size
()
==
0U
;
}
value_type
production
()
const
;
const
DDimVector
&
data
()
const
{
return
data_
;
}
const
std
::
vector
<
value_type
>
&
data
()
const
{
return
data_
;
}
value_type
production
()
const
;
value_type
count
(
int
start
,
int
end
)
const
;
DDimLite
Slice
(
int
start
,
int
end
)
const
;
...
...
@@ -76,6 +117,12 @@ class DDimLite {
return
os
;
}
DDimLite
&
operator
=
(
const
DDimLite
&
a
)
{
this
->
data_
.
resize
(
a
.
size
());
memcpy
(
this
->
data_
.
data
(),
a
.
data_
.
data
(),
a
.
size
()
*
sizeof
(
value_type
));
return
*
this
;
}
friend
bool
operator
==
(
const
DDimLite
&
a
,
const
DDimLite
&
b
)
{
if
(
a
.
size
()
!=
b
.
size
())
return
false
;
for
(
size_t
i
=
0
;
i
<
a
.
size
();
i
++
)
{
...
...
@@ -85,11 +132,15 @@ class DDimLite {
}
friend
bool
operator
!=
(
const
DDimLite
&
a
,
const
DDimLite
&
b
)
{
return
!
(
a
==
b
);
if
(
a
.
size
()
!=
b
.
size
())
return
true
;
for
(
size_t
i
=
0
;
i
<
a
.
size
();
i
++
)
{
if
(
a
[
i
]
!=
b
[
i
])
return
true
;
}
return
false
;
}
private:
std
::
vector
<
value_type
>
data_
;
DDimVector
data_
;
};
using
LoD
=
std
::
vector
<
std
::
vector
<
uint64_t
>>
;
...
...
lite/kernels/host/multiclass_nms_compute.cc
浏览文件 @
1abd9921
...
...
@@ -359,8 +359,8 @@ void MulticlassNmsCompute::Run() {
uint64_t
num_kept
=
batch_starts
.
back
();
if
(
num_kept
==
0
)
{
if
(
return_index
)
{
outs
->
Resize
({
0
,
out_dim
});
index
->
Resize
({
0
,
1
});
outs
->
Resize
(
std
::
vector
<
int64_t
>
{
0
,
out_dim
});
index
->
Resize
(
std
::
vector
<
int64_t
>
{
0
,
1
});
}
else
{
outs
->
Resize
({
1
,
1
});
float
*
od
=
outs
->
mutable_data
<
float
>
();
...
...
@@ -375,8 +375,8 @@ void MulticlassNmsCompute::Run() {
if
(
score_size
==
3
)
{
scores_slice
=
scores
->
Slice
<
float
>
(
i
,
i
+
1
);
boxes_slice
=
boxes
->
Slice
<
float
>
(
i
,
i
+
1
);
scores_slice
.
Resize
({
score_dims
[
1
],
score_dims
[
2
]});
boxes_slice
.
Resize
({
score_dims
[
2
],
box_dim
});
scores_slice
.
Resize
(
std
::
vector
<
int64_t
>
{
score_dims
[
1
],
score_dims
[
2
]});
boxes_slice
.
Resize
(
std
::
vector
<
int64_t
>
{
score_dims
[
2
],
box_dim
});
if
(
return_index
)
{
offset
=
i
*
score_dims
[
2
];
}
...
...
lite/kernels/x86/reshape_compute.h
浏览文件 @
1abd9921
...
...
@@ -29,7 +29,6 @@ namespace x86 {
template
<
typename
T
>
void
Compute
(
const
lite
::
Tensor
*
in
,
lite
::
Tensor
*
out
)
{
auto
out_dims
=
out
->
dims
();
auto
in_dims
=
in
->
dims
();
out
->
CopyDataFrom
(
*
in
);
out
->
Resize
(
out_dims
);
}
...
...
lite/operators/generate_proposals_op.cc
浏览文件 @
1abd9921
...
...
@@ -32,7 +32,6 @@ bool GenerateProposalsOpLite::CheckShape() const {
auto
scores_dims
=
param_
.
Scores
->
dims
();
auto
bbox_dims
=
param_
.
BboxDeltas
->
dims
();
auto
im_info_dims
=
param_
.
ImInfo
->
dims
();
auto
anchors_dims
=
param_
.
Anchors
->
dims
();
auto
vars_dims
=
param_
.
Variances
->
dims
();
...
...
lite/operators/gru_unit_op.cc
浏览文件 @
1abd9921
...
...
@@ -54,7 +54,6 @@ bool GRUUnitOpLite::CheckShape() const {
bool
GRUUnitOpLite
::
InferShape
()
const
{
auto
input_dims
=
param_
.
input
->
dims
();
auto
hidden_prev_dims
=
param_
.
hidden_prev
->
dims
();
auto
weight_dims
=
param_
.
weight
->
dims
();
int
batch_size
=
input_dims
[
0
];
int
frame_size
=
hidden_prev_dims
[
1
];
...
...
lite/operators/lookup_table_op.cc
浏览文件 @
1abd9921
...
...
@@ -42,8 +42,6 @@ bool LookupTableOpLite::InferShape() const {
int
ids_rank
=
ids_dims
.
size
();
auto
output_dims
=
ids_dims
.
Slice
(
0
,
ids_rank
-
1
);
std
::
vector
<
int64_t
>
out_dims
;
for
(
int
i
=
0
;
i
<
ids_rank
-
1
;
++
i
)
{
out_dims
.
push_back
(
ids_dims
[
i
]);
...
...
lite/operators/match_matrix_tensor_op.cc
浏览文件 @
1abd9921
...
...
@@ -47,7 +47,6 @@ bool MatchMatrixTensorOpLite::InferShape() const {
const
Tensor
*
y
=
param_
.
y
;
DDim
x_dims
=
param_
.
x
->
dims
();
DDim
y_dims
=
param_
.
y
->
dims
();
DDim
w_dims
=
param_
.
w
->
dims
();
int
dim_t
=
param_
.
dim_t
;
const
auto
&
x_lod
=
x
->
lod
();
...
...
lite/tests/kernels/fc_compute_test.cc
浏览文件 @
1abd9921
...
...
@@ -47,7 +47,6 @@ void Relu(float* out, int num, int channel) {
DDim
ComputeOutDim
(
const
DDim
&
dim_in
,
const
DDim
&
wdim
,
int
in_num_col_dim
)
{
std
::
vector
<
int64_t
>
out_dim
;
out_dim
.
resize
(
in_num_col_dim
+
1
);
auto
in_mat_dims
=
dim_in
.
Flatten2D
(
in_num_col_dim
);
for
(
int
i
=
0
;
i
<
in_num_col_dim
;
++
i
)
{
out_dim
[
i
]
=
dim_in
[
i
];
}
...
...
lite/tests/kernels/slice_compute_test.cc
浏览文件 @
1abd9921
...
...
@@ -153,7 +153,8 @@ class SliceComputeTester : public arena::TestCase {
}
out
->
Resize
(
out_dims
);
auto
*
out_data
=
out
->
mutable_data
<
float
>
();
slice_ref
(
input_data
,
in_dims
.
data
(),
axes_
,
starts_
,
ends_
,
out_data
);
std
::
vector
<
int64_t
>
in_dims_vec
=
in_dims
.
Vectorize
();
slice_ref
(
input_data
,
in_dims_vec
,
axes_
,
starts_
,
ends_
,
out_data
);
}
void
PrepareOpDesc
(
cpp
::
OpDesc
*
op_desc
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录