Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
33437fe4
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
33437fe4
编写于
6月 14, 2019
作者:
H
hong19860320
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
enable mobilenetv1, fix the bugs of conv, pool, relu and split
test=develop
上级
5f833603
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
59 addition
and
42 deletion
+59
-42
paddle/fluid/lite/api/cxx_api_bin.cc
paddle/fluid/lite/api/cxx_api_bin.cc
+10
-7
paddle/fluid/lite/core/optimizer.h
paddle/fluid/lite/core/optimizer.h
+12
-12
paddle/fluid/lite/kernels/arm/conv_compute.cc
paddle/fluid/lite/kernels/arm/conv_compute.cc
+2
-2
paddle/fluid/lite/kernels/arm/elementwise_add_compute.cc
paddle/fluid/lite/kernels/arm/elementwise_add_compute.cc
+1
-1
paddle/fluid/lite/kernels/arm/pool_compute.cc
paddle/fluid/lite/kernels/arm/pool_compute.cc
+1
-1
paddle/fluid/lite/kernels/arm/relu_compute.h
paddle/fluid/lite/kernels/arm/relu_compute.h
+2
-0
paddle/fluid/lite/operators/conv_op.h
paddle/fluid/lite/operators/conv_op.h
+17
-12
paddle/fluid/lite/operators/pool_op.h
paddle/fluid/lite/operators/pool_op.h
+12
-4
paddle/fluid/lite/operators/relu_op.cc
paddle/fluid/lite/operators/relu_op.cc
+1
-2
paddle/fluid/lite/operators/split_op.cc
paddle/fluid/lite/operators/split_op.cc
+1
-1
未找到文件。
paddle/fluid/lite/api/cxx_api_bin.cc
浏览文件 @
33437fe4
...
...
@@ -14,9 +14,9 @@
#include "paddle/fluid/lite/api/cxx_api.h"
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
//
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
#include "paddle/fluid/lite/core/mir/passes.h"
#endif
//
#endif
#include "paddle/fluid/lite/core/op_registry.h"
...
...
@@ -24,6 +24,9 @@ namespace paddle {
namespace
lite
{
void
Run
(
const
char
*
model_dir
)
{
#ifdef LITE_WITH_ARM
DeviceInfo
::
Init
();
#endif
lite
::
ExecutorLite
predictor
;
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)}});
...
...
@@ -32,9 +35,9 @@ void Run(const char* model_dir) {
valid_places
);
auto
*
input_tensor
=
predictor
.
GetInput
(
0
);
input_tensor
->
Resize
(
DDim
(
std
::
vector
<
DDim
::
value_type
>
({
3
,
224
,
224
})));
input_tensor
->
Resize
(
DDim
(
std
::
vector
<
DDim
::
value_type
>
({
1
,
3
,
224
,
224
})));
auto
*
data
=
input_tensor
->
mutable_data
<
float
>
();
for
(
int
i
=
0
;
i
<
3
*
224
*
224
;
i
++
)
{
for
(
int
i
=
0
;
i
<
input_tensor
->
dims
().
production
()
;
i
++
)
{
data
[
i
]
=
i
;
}
...
...
@@ -65,7 +68,7 @@ USE_LITE_OP(feed);
USE_LITE_OP
(
fetch
);
USE_LITE_OP
(
io_copy
);
USE_LITE_OP
(
con2d
);
USE_LITE_OP
(
con
v
2d
);
// USE_LITE_OP(batch_norm);
USE_LITE_OP
(
relu
);
USE_LITE_OP
(
depthwise_conv2d
);
...
...
@@ -81,10 +84,10 @@ USE_LITE_KERNEL(fc, kARM, kFloat, kNCHW, def);
USE_LITE_KERNEL
(
mul
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
scale
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
con2d
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
con
v
2d
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
batch_norm
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
relu
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
depthwise_con2d
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
depthwise_con
v
2d
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
pool2d
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
elementwise_add
,
kARM
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
softmax
,
kARM
,
kFloat
,
kNCHW
,
def
);
...
...
paddle/fluid/lite/core/optimizer.h
浏览文件 @
33437fe4
...
...
@@ -46,24 +46,24 @@ class Optimizer {
SpecifyKernelPickTactic
(
kernel_pick_factor
);
InitTargetTypeTransformPass
();
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
//
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
if
(
passes
.
empty
())
{
RunPasses
(
std
::
vector
<
std
::
string
>
{{
"static_kernel_pick_pass"
,
//
"variable_place_inference_pass"
,
//
"argument_type_display_pass"
,
//
"type_target_transform_pass"
,
//
"argument_type_display_pass"
,
//
"variable_place_inference_pass"
,
//
"argument_type_display_pass"
,
//
"io_copy_kernel_pick_pass"
,
//
"variable_place_inference_pass"
,
//
"runtime_context_assign_pass"
,
//
//
"static_kernel_pick_pass", //
//
"variable_place_inference_pass", //
//
"argument_type_display_pass", //
//
"type_target_transform_pass", //
//
"argument_type_display_pass", //
//
"variable_place_inference_pass", //
//
"argument_type_display_pass", //
//
"io_copy_kernel_pick_pass", //
//
"variable_place_inference_pass", //
"runtime_context_assign_pass"
,
//
}});
}
else
{
RunPasses
(
passes
);
}
#endif
//
#endif
exec_scope_
=
program
.
exec_scope
();
}
...
...
paddle/fluid/lite/kernels/arm/conv_compute.cc
浏览文件 @
33437fe4
...
...
@@ -102,7 +102,7 @@ REGISTER_LITE_KERNEL(conv2d, kARM, kFloat, kNCHW,
.
BindInput
(
"Input"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindInput
(
"Bias"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindInput
(
"Filter"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindOutput
(
"Out"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindOutput
(
"Out
put
"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
Finalize
();
REGISTER_LITE_KERNEL
(
depthwise_conv2d
,
kARM
,
kFloat
,
kNCHW
,
...
...
@@ -110,5 +110,5 @@ REGISTER_LITE_KERNEL(depthwise_conv2d, kARM, kFloat, kNCHW,
.
BindInput
(
"Input"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindInput
(
"Bias"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindInput
(
"Filter"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindOutput
(
"Out"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindOutput
(
"Out
put
"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
Finalize
();
paddle/fluid/lite/kernels/arm/elementwise_add_compute.cc
浏览文件 @
33437fe4
...
...
@@ -26,7 +26,7 @@ void ElementwiseAddCompute::Run() {
const
float
*
y_data
=
param
.
Y
->
data
<
float
>
();
float
*
out_data
=
param
.
Out
->
mutable_data
<
float
>
();
int
n
=
param
.
X
->
dims
().
production
();
lite
::
arm
::
math
::
elementwise_add
(
x_data
,
y_data
,
out_data
,
n
);
//
lite::arm::math::elementwise_add(x_data, y_data, out_data, n);
}
}
// namespace arm
...
...
paddle/fluid/lite/kernels/arm/pool_compute.cc
浏览文件 @
33437fe4
...
...
@@ -163,7 +163,7 @@ PrecisionType PoolCompute::precision() const { return PRECISION(kFloat); }
}
// namespace lite
}
// namespace paddle
REGISTER_LITE_KERNEL
(
pool
,
kARM
,
kFloat
,
kNCHW
,
REGISTER_LITE_KERNEL
(
pool
2d
,
kARM
,
kFloat
,
kNCHW
,
paddle
::
lite
::
kernels
::
arm
::
PoolCompute
,
def
)
.
BindInput
(
"X"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindOutput
(
"Out"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
...
...
paddle/fluid/lite/kernels/arm/relu_compute.h
浏览文件 @
33437fe4
...
...
@@ -45,4 +45,6 @@ class ReluCompute : public KernelLite<TARGET(kARM), PRECISION(kFloat)> {
REGISTER_LITE_KERNEL
(
relu
,
kARM
,
kFloat
,
kNCHW
,
paddle
::
lite
::
kernels
::
arm
::
ReluCompute
,
def
)
.
BindInput
(
"X"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
BindOutput
(
"Out"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kARM
))})
.
Finalize
();
paddle/fluid/lite/operators/conv_op.h
浏览文件 @
33437fe4
...
...
@@ -40,11 +40,11 @@ class ConvOpLite : public OpLite {
bool
AttachImpl
(
const
cpp
::
OpDesc
&
op_desc
,
lite
::
Scope
*
scope
)
override
{
auto
input
=
op_desc
.
Input
(
"Input"
).
front
();
auto
filter
=
op_desc
.
Input
(
"Filter"
).
front
();
auto
out
=
op_desc
.
Output
(
"O
ut"
).
front
();
auto
out
put
=
op_desc
.
Output
(
"Outp
ut"
).
front
();
param_
.
x
=
scope
->
FindVar
(
input
)
->
GetMutable
<
lite
::
Tensor
>
();
param_
.
filter
=
scope
->
FindVar
(
filter
)
->
GetMutable
<
lite
::
Tensor
>
();
CHECK
(
scope
->
FindVar
(
out
));
param_
.
output
=
scope
->
FindVar
(
out
)
->
GetMutable
<
lite
::
Tensor
>
();
CHECK
(
scope
->
FindVar
(
out
put
));
param_
.
output
=
scope
->
FindVar
(
out
put
)
->
GetMutable
<
lite
::
Tensor
>
();
param_
.
strides
=
op_desc
.
GetAttr
<
std
::
vector
<
int
>>
(
"strides"
);
param_
.
paddings
=
op_desc
.
GetAttr
<
std
::
vector
<
int
>>
(
"paddings"
);
param_
.
groups
=
op_desc
.
GetAttr
<
int
>
(
"groups"
);
...
...
@@ -53,19 +53,24 @@ class ConvOpLite : public OpLite {
std
::
vector
<
std
::
string
>
input_arg_names
=
op_desc
.
InputArgumentNames
();
if
(
std
::
find
(
input_arg_names
.
begin
(),
input_arg_names
.
end
(),
"Bias"
)
!=
input_arg_names
.
end
())
{
auto
bias_var
=
scope
->
FindVar
(
op_desc
.
Input
(
"Bias"
).
front
());
if
(
bias_var
!=
nullptr
)
{
param_
.
bias
=
const_cast
<
lite
::
Tensor
*>
(
&
(
bias_var
->
Get
<
lite
::
Tensor
>
()));
auto
bias_arguments
=
op_desc
.
Input
(
"Bias"
);
if
(
bias_arguments
.
size
()
>
0
)
{
auto
bias_var
=
scope
->
FindVar
(
bias_arguments
.
front
());
if
(
bias_var
!=
nullptr
)
{
param_
.
bias
=
const_cast
<
lite
::
Tensor
*>
(
&
(
bias_var
->
Get
<
lite
::
Tensor
>
()));
}
}
}
if
(
std
::
find
(
input_arg_names
.
begin
(),
input_arg_names
.
end
(),
"ResidualData"
)
!=
input_arg_names
.
end
())
{
auto
residual_data_var
=
scope
->
FindVar
(
op_desc
.
Input
(
"ResidualData"
).
front
());
if
(
residual_data_var
!=
nullptr
)
{
param_
.
residualData
=
const_cast
<
lite
::
Tensor
*>
(
&
(
residual_data_var
->
Get
<
lite
::
Tensor
>
()));
auto
res_data_arguments
=
op_desc
.
Input
(
"ResidualData"
);
if
(
res_data_arguments
.
size
()
>
0
)
{
auto
residual_data_var
=
scope
->
FindVar
(
res_data_arguments
.
front
());
if
(
residual_data_var
!=
nullptr
)
{
param_
.
residualData
=
const_cast
<
lite
::
Tensor
*>
(
&
(
residual_data_var
->
Get
<
lite
::
Tensor
>
()));
}
}
}
return
true
;
...
...
paddle/fluid/lite/operators/pool_op.h
浏览文件 @
33437fe4
...
...
@@ -53,10 +53,18 @@ class PoolOpLite : public OpLite {
param_
.
strides
=
op_desc
.
GetAttr
<
std
::
vector
<
int
>>
(
"strides"
);
param_
.
paddings
=
op_desc
.
GetAttr
<
std
::
vector
<
int
>>
(
"paddings"
);
param_
.
exclusive
=
op_desc
.
GetAttr
<
bool
>
(
"exclusive"
);
param_
.
adaptive
=
op_desc
.
GetAttr
<
bool
>
(
"adaptive"
);
param_
.
ceil_mode
=
op_desc
.
GetAttr
<
bool
>
(
"ceil_mode"
);
param_
.
use_quantizer
=
op_desc
.
GetAttr
<
bool
>
(
"use_quantizer"
);
if
(
op_desc
.
HasAttr
(
"exclusive"
))
{
param_
.
exclusive
=
op_desc
.
GetAttr
<
bool
>
(
"exclusive"
);
}
if
(
op_desc
.
HasAttr
(
"adaptive"
))
{
param_
.
adaptive
=
op_desc
.
GetAttr
<
bool
>
(
"adaptive"
);
}
if
(
op_desc
.
HasAttr
(
"ceil_mode"
))
{
param_
.
ceil_mode
=
op_desc
.
GetAttr
<
bool
>
(
"ceil_mode"
);
}
if
(
op_desc
.
HasAttr
(
"use_quantizer"
))
{
param_
.
use_quantizer
=
op_desc
.
GetAttr
<
bool
>
(
"use_quantizer"
);
}
// param_.data_format = op_desc.GetAttr<bool>("data_format");
return
true
;
}
...
...
paddle/fluid/lite/operators/relu_op.cc
浏览文件 @
33437fe4
...
...
@@ -32,12 +32,11 @@ bool ReluOp::InferShape() const {
bool
ReluOp
::
AttachImpl
(
const
cpp
::
OpDesc
&
opdesc
,
lite
::
Scope
*
scope
)
{
param_
.
input
=
const_cast
<
lite
::
Tensor
*>
(
&
scope
->
FindVar
(
opdesc
.
Input
(
"
Input
"
).
front
())
->
Get
<
lite
::
Tensor
>
());
&
scope
->
FindVar
(
opdesc
.
Input
(
"
X
"
).
front
())
->
Get
<
lite
::
Tensor
>
());
param_
.
output
=
scope
->
FindVar
(
opdesc
.
Output
(
"Out"
).
front
())
->
GetMutable
<
lite
::
Tensor
>
();
CHECK
(
param_
.
input
);
CHECK
(
param_
.
output
);
kernel_
->
SetParam
(
param_
);
return
true
;
}
...
...
paddle/fluid/lite/operators/split_op.cc
浏览文件 @
33437fe4
...
...
@@ -37,7 +37,7 @@ bool SplitOp::InferShape() const {
const
auto
&
sections
=
param_
.
sections
;
const
int
outs_number
=
outs
.
size
();
std
::
vector
<
lite
::
DDim
Hvy
>
outs_dims
;
std
::
vector
<
lite
::
DDim
>
outs_dims
;
outs_dims
.
reserve
(
outs_number
);
if
(
num
>
0
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录