Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
e60ab7ae
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e60ab7ae
编写于
9月 20, 2018
作者:
R
Ruilong Liu
提交者:
GitHub
9月 20, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into develop
上级
e1b11237
29ffc3be
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
19 addition
and
17 deletion
+19
-17
src/fpga/api.cpp
src/fpga/api.cpp
+3
-3
src/fpga/image.cpp
src/fpga/image.cpp
+4
-2
src/operators/feed_op.h
src/operators/feed_op.h
+1
-1
src/operators/kernel/fpga/concat_kernel.cpp
src/operators/kernel/fpga/concat_kernel.cpp
+1
-1
src/operators/kernel/fpga/conv_add_bn_kernel.cpp
src/operators/kernel/fpga/conv_add_bn_kernel.cpp
+1
-1
src/operators/kernel/fpga/conv_add_bn_relu_kernel.cpp
src/operators/kernel/fpga/conv_add_bn_relu_kernel.cpp
+1
-1
src/operators/kernel/fpga/conv_add_relu_kernel.cpp
src/operators/kernel/fpga/conv_add_relu_kernel.cpp
+1
-1
src/operators/kernel/fpga/conv_bn_kernel.cpp
src/operators/kernel/fpga/conv_bn_kernel.cpp
+1
-1
src/operators/kernel/fpga/conv_bn_relu_kernel.cpp
src/operators/kernel/fpga/conv_bn_relu_kernel.cpp
+1
-1
src/operators/kernel/fpga/elementwise_add_relu_kernel.cpp
src/operators/kernel/fpga/elementwise_add_relu_kernel.cpp
+1
-1
src/operators/kernel/fpga/fc_relu_kernel.cpp
src/operators/kernel/fpga/fc_relu_kernel.cpp
+1
-1
src/operators/kernel/fpga/fusion_fc_kernel.cpp
src/operators/kernel/fpga/fusion_fc_kernel.cpp
+1
-1
src/operators/kernel/fpga/pool_kernel.cpp
src/operators/kernel/fpga/pool_kernel.cpp
+1
-1
src/operators/kernel/fpga/softmax_kernel.cpp
src/operators/kernel/fpga/softmax_kernel.cpp
+1
-1
未找到文件。
src/fpga/api.cpp
浏览文件 @
e60ab7ae
...
...
@@ -86,14 +86,14 @@ void fpga_copy(void *dest, const void *src, size_t num) {
}
int
fpga_flush
(
void
*
address
,
size_t
size
)
{
struct
MemoryCacheArgs
args
;
struct
MemoryCacheArgs
args
=
{
nullptr
}
;
args
.
address
=
address
;
args
.
size
=
size
;
return
do_ioctl
(
IOCTL_MEMCACHE_FLUSH
,
&
args
);
}
int
fpga_invalidate
(
void
*
address
,
size_t
size
)
{
struct
MemoryCacheArgs
args
;
struct
MemoryCacheArgs
args
=
{
nullptr
}
;
args
.
address
=
address
;
args
.
size
=
size
;
return
do_ioctl
(
IOCTL_MEMCACHE_INVAL
,
&
args
);
...
...
@@ -332,7 +332,7 @@ void format_concat_output(framework::Tensor *out, int height, int width,
sum_cw
=
align_to_x
(
width
*
sum_channel
,
IMAGE_ALIGNMENT
);
auto
data_ptr
=
fpga_malloc
(
height
*
sum_cw
*
sizeof
(
half
));
auto
ddim
=
framework
::
make_ddim
({
-
1
,
sum_channel
,
height
,
width
});
auto
ddim
=
framework
::
make_ddim
({
1
,
sum_channel
,
height
,
width
});
out
->
Resize
(
ddim
);
out
->
reset_data_ptr
(
data_ptr
);
}
...
...
src/fpga/image.cpp
浏览文件 @
e60ab7ae
...
...
@@ -74,15 +74,17 @@ void concat_images(int16_t **images_in, float **scales_in, void *image_out,
int
align_each_in_area_cw
=
0
;
int
align_each_out_area_cw_differ
=
0
;
int
tmp_channel
=
0
;
*
scale_out
=
0
;
scale_out
[
0
]
=
0.0
;
scale_out
[
1
]
=
0.0
;
for
(
i
=
0
;
i
<
image_num
;
i
++
)
{
each_out_line_channel
+=
channel_num
[
i
];
*
scale_out
=
std
::
max
(
*
scale_out
,
scales_in
[
i
][
0
]);
scale_out
[
0
]
=
std
::
max
(
*
scale_out
,
scales_in
[
i
][
0
]);
fpga_invalidate
(
images_in
[
i
],
height
*
align_to_x
(
channel_num
[
i
]
*
width
,
IMAGE_ALIGNMENT
)
*
sizeof
(
int16_t
));
}
scale_out
[
1
]
=
1
/
scale_out
[
0
];
align_each_out_area_cw
=
align_to_x
(
each_out_line_channel
*
width
,
IMAGE_ALIGNMENT
);
align_each_out_area_cw_differ
=
...
...
src/operators/feed_op.h
浏览文件 @
e60ab7ae
...
...
@@ -55,7 +55,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
Tensor
*
output
=
param_
.
Out
();
auto
output_ptr
=
output
->
data
<
half
>
();
fpga
::
BypassArgs
args
;
fpga
::
BypassArgs
args
=
{
fpga
::
DATA_TYPE_FP32
}
;
args
.
input_data_type
=
fpga
::
DATA_TYPE_FP32
;
args
.
output_data_type
=
fpga
::
DATA_TYPE_FP16
;
...
...
src/operators/kernel/fpga/concat_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -43,7 +43,7 @@ bool ConcatKernel<FPGA, float>::Init(ConcatParam<FPGA> *param) {
fpga
::
format_concat_output
(
out
,
(
int
)
height
,
(
int
)
width
,
(
int
)
image_num
,
channel_num
);
fpga
::
ConcatArgs
concatArgs
;
fpga
::
ConcatArgs
concatArgs
=
{
0
}
;
concatArgs
.
image_num
=
(
uint32_t
)
image_num
;
concatArgs
.
images_in
=
images_in
;
concatArgs
.
scales_in
=
scales_in
;
...
...
src/operators/kernel/fpga/conv_add_bn_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -66,7 +66,7 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) {
fpga
::
format_bias_scale_array
(
&
bs_ptr
,
element_num_per_div
,
channel
);
fpga
::
format_fp16_ofm
(
out
);
fpga
::
WrapperConvArgs
conv_arg
;
fpga
::
WrapperConvArgs
conv_arg
=
{
0
}
;
fpga
::
fill_conv_arg
(
&
conv_arg
,
input
,
out
,
filter
,
relu_enabled
,
param
->
Groups
(),
param
->
Strides
()[
0
],
param
->
Strides
()[
1
],
param
->
Paddings
()[
0
],
param
->
Paddings
()[
1
],
bs_ptr
);
...
...
src/operators/kernel/fpga/conv_add_bn_relu_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -64,7 +64,7 @@ bool ConvAddBNReluKernel<FPGA, float>::Init(
fpga
::
format_fp16_ofm
(
out
);
fpga
::
WrapperConvArgs
conv_arg
;
fpga
::
WrapperConvArgs
conv_arg
=
{
0
}
;
fpga
::
fill_conv_arg
(
&
conv_arg
,
input
,
out
,
filter
,
relu_enabled
,
param
->
Groups
(),
param
->
Strides
()[
0
],
param
->
Strides
()[
1
],
param
->
Paddings
()[
0
],
param
->
Paddings
()[
1
],
bs_ptr
);
...
...
src/operators/kernel/fpga/conv_add_relu_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -46,7 +46,7 @@ bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam<FPGA> *param) {
fpga
::
format_fp16_ofm
(
out
);
fpga
::
WrapperConvArgs
conv_arg
;
fpga
::
WrapperConvArgs
conv_arg
=
{
0
}
;
fpga
::
fill_conv_arg
(
&
conv_arg
,
input
,
out
,
filter
,
relu_enabled
,
param
->
Groups
(),
param
->
Strides
()[
0
],
param
->
Strides
()[
1
],
param
->
Paddings
()[
0
],
param
->
Paddings
()[
1
],
bs_ptr
);
...
...
src/operators/kernel/fpga/conv_bn_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -58,7 +58,7 @@ bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam<FPGA> *param) {
fpga
::
format_fp16_ofm
(
out
);
fpga
::
WrapperConvArgs
conv_arg
;
fpga
::
WrapperConvArgs
conv_arg
=
{
0
}
;
fpga
::
fill_conv_arg
(
&
conv_arg
,
input
,
out
,
filter
,
relu_enabled
,
param
->
Groups
(),
param
->
Strides
()[
0
],
param
->
Strides
()[
1
],
param
->
Paddings
()[
0
],
param
->
Paddings
()[
1
],
bs_ptr
);
...
...
src/operators/kernel/fpga/conv_bn_relu_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -58,7 +58,7 @@ bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam<FPGA> *param) {
fpga
::
format_fp16_ofm
(
out
);
fpga
::
WrapperConvArgs
conv_arg
;
fpga
::
WrapperConvArgs
conv_arg
=
{
0
}
;
fpga
::
fill_conv_arg
(
&
conv_arg
,
input
,
out
,
filter
,
relu_enabled
,
param
->
Groups
(),
param
->
Strides
()[
0
],
param
->
Strides
()[
1
],
param
->
Paddings
()[
0
],
param
->
Paddings
()[
1
],
bs_ptr
);
...
...
src/operators/kernel/fpga/elementwise_add_relu_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -30,7 +30,7 @@ bool ElementwiseAddReluKernel<FPGA, float>::Init(
fpga
::
format_fp16_ofm
(
out
);
auto
out_ptr
=
out
->
mutable_data
<
float
>
();
fpga
::
EWAddArgs
ewaddArgs
;
fpga
::
EWAddArgs
ewaddArgs
=
{
0
}
;
ewaddArgs
.
relu_enabled
=
relu_enabled
;
ewaddArgs
.
const0
=
1
;
ewaddArgs
.
const1
=
1
;
...
...
src/operators/kernel/fpga/fc_relu_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -51,7 +51,7 @@ bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam<FPGA> *param) {
fpga
::
format_bias_scale_array
(
&
bs_ptr
,
element_num_per_div
,
channel
);
fpga
::
format_fp16_ofm
(
out
);
fpga
::
WrapperConvArgs
conv_arg
;
fpga
::
WrapperConvArgs
conv_arg
=
{
0
}
;
fpga
::
fill_conv_arg
(
&
conv_arg
,
input_x
,
out
,
filter
,
relu_enabled
,
1
,
1
,
1
,
0
,
0
,
bs_ptr
);
param
->
SetFpgaArgs
(
conv_arg
);
...
...
src/operators/kernel/fpga/fusion_fc_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -52,7 +52,7 @@ bool FusionFcKernel<FPGA, float>::Init(FusionFcParam<FPGA> *param) {
fpga
::
format_bias_scale_array
(
&
bs_ptr
,
element_num_per_div
,
channel
);
fpga
::
format_fp16_ofm
(
out
);
fpga
::
WrapperConvArgs
conv_arg
;
fpga
::
WrapperConvArgs
conv_arg
=
{
0
}
;
fpga
::
fill_conv_arg
(
&
conv_arg
,
input_x
,
out
,
filter
,
relu_enabled
,
1
,
1
,
1
,
0
,
0
,
bs_ptr
);
param
->
SetFpgaArgs
(
conv_arg
);
...
...
src/operators/kernel/fpga/pool_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -30,7 +30,7 @@ bool PoolKernel<FPGA, float>::Init(PoolParam<FPGA> *param) {
vector
<
int
>
strides
=
param
->
Strides
();
vector
<
int
>
paddings
=
param
->
Paddings
();
fpga
::
PoolingArgs
poolArgs
;
fpga
::
PoolingArgs
poolArgs
=
{
0
}
;
poolArgs
.
image
.
address
=
input_ptr
;
poolArgs
.
image
.
channels
=
(
uint32_t
)
input
->
dims
()[
1
];
poolArgs
.
image
.
height
=
(
uint32_t
)
input
->
dims
()[
2
];
...
...
src/operators/kernel/fpga/softmax_kernel.cpp
浏览文件 @
e60ab7ae
...
...
@@ -29,7 +29,7 @@ bool SoftmaxKernel<FPGA, float>::Init(SoftmaxParam<FPGA> *param) {
auto
float_input
=
new
Tensor
(
*
input
);
fpga
::
format_fp32_ofm
(
float_input
);
fpga
::
BypassArgs
args
;
fpga
::
BypassArgs
args
=
{
fpga
::
DATA_TYPE_FP16
}
;
args
.
input_layout_type
=
fpga
::
LAYOUT_HWC
;
args
.
output_layout_type
=
fpga
::
LAYOUT_CHW
;
args
.
input_data_type
=
fpga
::
DATA_TYPE_FP16
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录