Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
d6709eb9
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d6709eb9
编写于
12月 16, 2019
作者:
C
cen.li
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
* resnet50 result ok
* test=develop
上级
1e160622
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
49 addition
and
63 deletion
+49
-63
lite/kernels/bm/bridges/batch_norm_op.cc
lite/kernels/bm/bridges/batch_norm_op.cc
+19
-22
lite/kernels/bm/bridges/elementwise_ops.cc
lite/kernels/bm/bridges/elementwise_ops.cc
+8
-9
lite/kernels/bm/bridges/scale_op.cc
lite/kernels/bm/bridges/scale_op.cc
+22
-32
未找到文件。
lite/kernels/bm/bridges/batch_norm_op.cc
浏览文件 @
d6709eb9
...
@@ -43,6 +43,8 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
...
@@ -43,6 +43,8 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
i_x_shape_data
[
i
]
=
static_cast
<
int
>
(
x_shape_data
[
i
]);
i_x_shape_data
[
i
]
=
static_cast
<
int
>
(
x_shape_data
[
i
]);
}
}
int
channel_size
=
x_dims
[
1
];
auto
scale_var_name
=
op_info
->
Input
(
"Scale"
).
front
();
auto
scale_var_name
=
op_info
->
Input
(
"Scale"
).
front
();
auto
scale
=
scope
->
FindVar
(
scale_var_name
)
->
GetMutable
<
lite
::
Tensor
>
();
auto
scale
=
scope
->
FindVar
(
scale_var_name
)
->
GetMutable
<
lite
::
Tensor
>
();
...
@@ -68,31 +70,26 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
...
@@ -68,31 +70,26 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
auto
epsilon
=
op_info
->
GetAttr
<
float
>
(
"epsilon"
);
auto
epsilon
=
op_info
->
GetAttr
<
float
>
(
"epsilon"
);
auto
unique_bn_out_name
=
lite
::
bm
::
UniqueName
(
"batch_norm_out"
);
auto
unique_bn_out_name
=
lite
::
bm
::
UniqueName
(
"batch_norm_out"
);
add_batchnorm_layer
(
graph_ctx
->
bm_compiler_handle
,
auto
*
scale_data
=
scale
->
mutable_data
<
float
>
();
const_cast
<
const
int
*>
(
i_x_shape_data
),
auto
*
bias_data
=
bias
->
mutable_data
<
float
>
();
x_dims
.
size
(),
auto
*
mean_data
=
mean
->
mutable_data
<
float
>
();
static_cast
<
const
char
*>
(
x_var_name
.
c_str
()),
auto
*
variance_data
=
variance
->
mutable_data
<
float
>
();
const_cast
<
const
int
*>
(
i_output_shape_data
),
output_dims
.
size
(),
for
(
int
c
=
0
;
c
<
channel_size
;
c
++
)
{
static_cast
<
const
char
*>
(
unique_bn_out_name
.
c_str
()),
float
inv_scale
=
1.
f
/
(
std
::
sqrt
(
variance_data
[
c
]
+
epsilon
));
static_cast
<
const
char
*>
(
unique_op_name
.
c_str
()),
bias_data
[
c
]
=
bias_data
[
c
]
-
inv_scale
*
scale_data
[
c
]
*
mean_data
[
c
];
static_cast
<
const
float
*>
(
mean
->
mutable_data
<
float
>
()),
scale_data
[
c
]
=
inv_scale
*
scale_data
[
c
];
static_cast
<
const
float
*>
(
variance
->
mutable_data
<
float
>
()),
}
1.
f
,
epsilon
,
0
,
1
);
const
int
input_num
=
1
;
const
int
input_num
=
1
;
int
**
shape
=
new
int
*
[
input_num
];
int
**
shape
=
new
int
*
[
input_num
];
int
*
dim
=
new
int
[
input_num
];
int
*
dim
=
new
int
[
input_num
];
const
char
**
name
=
new
const
char
*
[
input_num
];
const
char
**
name
=
new
const
char
*
[
input_num
];
name
[
0
]
=
static_cast
<
const
char
*>
(
unique_bn_out
_name
.
c_str
());
name
[
0
]
=
static_cast
<
const
char
*>
(
x_var
_name
.
c_str
());
dim
[
0
]
=
output
_dims
.
size
();
dim
[
0
]
=
x
_dims
.
size
();
shape
[
0
]
=
i_
output
_shape_data
;
shape
[
0
]
=
i_
x
_shape_data
;
auto
unique_scale_name
=
lite
::
bm
::
UniqueName
(
"scale"
);
add_scale_layer
(
graph_ctx
->
bm_compiler_handle
,
add_scale_layer
(
graph_ctx
->
bm_compiler_handle
,
input_num
,
input_num
,
shape
,
shape
,
...
@@ -101,12 +98,12 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
...
@@ -101,12 +98,12 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
const_cast
<
const
int
*>
(
i_output_shape_data
),
const_cast
<
const
int
*>
(
i_output_shape_data
),
output_dims
.
size
(),
output_dims
.
size
(),
static_cast
<
const
char
*>
(
output_var_name
.
c_str
()),
static_cast
<
const
char
*>
(
output_var_name
.
c_str
()),
static_cast
<
const
char
*>
(
unique_
scale
_name
.
c_str
()),
static_cast
<
const
char
*>
(
unique_
op
_name
.
c_str
()),
static_cast
<
const
float
*>
(
scale
->
mutable_data
<
float
>
()),
static_cast
<
const
float
*>
(
scale
->
mutable_data
<
float
>
()),
static_cast
<
const
float
*>
(
bias
->
mutable_data
<
float
>
()),
static_cast
<
const
float
*>
(
bias
->
mutable_data
<
float
>
()),
1
,
1
,
1
,
1
,
0
);
1
);
delete
[]
shape
;
delete
[]
shape
;
delete
[]
name
;
delete
[]
name
;
...
...
lite/kernels/bm/bridges/elementwise_ops.cc
浏览文件 @
d6709eb9
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
#include "lite/kernels/bm/bridges/registry.h"
#include "lite/kernels/bm/bridges/registry.h"
#include "bmcompiler_if.h"
#include "bmcompiler_if.h"
#include "bmcompiler_if_lite.h"
#include "bmcompiler_if_lite.h"
#include "bmcompiler_defs.h"
namespace
paddle
{
namespace
paddle
{
namespace
lite
{
namespace
lite
{
...
@@ -50,7 +51,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
...
@@ -50,7 +51,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
shape
[
0
]
=
i_x_shape_data
;
shape
[
0
]
=
i_x_shape_data
;
auto
y_var_name
=
op_info
->
Input
(
"Y"
).
front
();
auto
y_var_name
=
op_info
->
Input
(
"Y"
).
front
();
auto
y
=
scope
->
FindVar
(
y_var_name
)
->
GetMutable
<
lite
::
Tensor
>
();
auto
y
=
scope
->
FindVar
(
y_var_name
)
->
GetMutable
<
lite
::
Tensor
>
();
auto
y_dims
=
y
->
dims
();
auto
y_dims
=
y
->
dims
();
name
[
1
]
=
static_cast
<
const
char
*>
(
y_var_name
.
c_str
());
name
[
1
]
=
static_cast
<
const
char
*>
(
y_var_name
.
c_str
());
...
@@ -61,7 +61,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
...
@@ -61,7 +61,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
i_y_shape_data
[
i
]
=
static_cast
<
int
>
(
y_shape_data
[
i
]);
i_y_shape_data
[
i
]
=
static_cast
<
int
>
(
y_shape_data
[
i
]);
}
}
shape
[
1
]
=
i_y_shape_data
;
shape
[
1
]
=
i_y_shape_data
;
bool
y_is_const
=
input_nodes
.
find
(
y_var_name
)
==
input_nodes
.
end
();
bool
y_is_const
=
input_nodes
.
find
(
y_var_name
)
==
input_nodes
.
end
();
// output
// output
...
@@ -105,25 +104,25 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
...
@@ -105,25 +104,25 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
coeff
);
coeff
);
}
else
{
}
else
{
const
float
*
y_data
=
const_cast
<
const
float
*>
(
y
->
mutable_data
<
float
>
());
const
float
*
y_data
=
const_cast
<
const
float
*>
(
y
->
mutable_data
<
float
>
());
const
float
*
x_data
=
const_cast
<
const
float
*>
(
x
->
mutable_data
<
float
>
());
bm_add_const_tensor
(
graph_ctx
->
bm_compiler_handle
,
bm_add_const_tensor
(
graph_ctx
->
bm_compiler_handle
,
name
[
0
],
name
[
1
],
shape
[
0
],
shape
[
0
],
dim
[
0
],
dim
[
0
],
static_cast
<
bm_data_type_t
>
(
0
),
static_cast
<
bm_data_type_t
>
(
DTYPE_FP32
),
static_cast
<
const
void
*>
(
y_data
));
static_cast
<
const
void
*>
(
y_data
));
add_binary_layer_v2
(
graph_ctx
->
bm_compiler_handle
,
add_binary_layer_v2
(
graph_ctx
->
bm_compiler_handle
,
name
[
0
],
name
[
0
],
shape
[
0
],
shape
[
0
],
dim
[
0
],
dim
[
0
],
0
,
0
,
nullptr
,
static_cast
<
const
float
*>
(
x_data
)
,
name
[
0
],
name
[
1
],
shape
[
0
],
shape
[
0
],
dim
[
0
],
dim
[
0
],
0
,
0
,
nullptr
,
static_cast
<
const
float
*>
(
y_data
)
,
static_cast
<
const
char
*>
(
output_var_name
.
c_str
()),
static_cast
<
const
char
*>
(
output_var_name
.
c_str
()),
0
);
0
);
}
}
...
...
lite/kernels/bm/bridges/scale_op.cc
浏览文件 @
d6709eb9
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
#include "lite/kernels/bm/bridges/registry.h"
#include "lite/kernels/bm/bridges/registry.h"
#include "lite/backends/bm/builder.h"
#include "lite/backends/bm/builder.h"
#include "bmcompiler_if.h"
#include "bmcompiler_if.h"
#include "bmcompiler_op_code.h"
namespace
paddle
{
namespace
paddle
{
namespace
lite
{
namespace
lite
{
...
@@ -34,59 +35,48 @@ node_map_type ScaleConverter(const std::shared_ptr<lite::OpLite> scale_op,
...
@@ -34,59 +35,48 @@ node_map_type ScaleConverter(const std::shared_ptr<lite::OpLite> scale_op,
auto
unique_op_name
=
lite
::
bm
::
UniqueName
(
op_type
);
auto
unique_op_name
=
lite
::
bm
::
UniqueName
(
op_type
);
// input
// input
const
int
input_num
=
1
;
int
**
shape
=
new
int
*
[
input_num
];
int
*
dim
=
new
int
[
input_num
];
const
char
**
name
=
new
const
char
*
[
input_num
];
auto
x_var_name
=
op_info
->
Input
(
"X"
).
front
();
auto
x_var_name
=
op_info
->
Input
(
"X"
).
front
();
auto
x
=
scope
->
FindVar
(
x_var_name
)
->
GetMutable
<
lite
::
Tensor
>
();
auto
x
=
scope
->
FindVar
(
x_var_name
)
->
GetMutable
<
lite
::
Tensor
>
();
auto
x_dims
=
x
->
dims
();
auto
x_dims
=
x
->
dims
();
name
[
0
]
=
static_cast
<
const
char
*>
(
x_var_name
.
c_str
());
dim
[
0
]
=
x_dims
.
size
();
const
long
int
*
x_shape_data
=
const_cast
<
const
long
int
*>
(
&
x_dims
.
data
()[
0
]);
const
long
int
*
x_shape_data
=
const_cast
<
const
long
int
*>
(
&
x_dims
.
data
()[
0
]);
int
i_x_shape_data
[
x_dims
.
size
()];
int
i_x_shape_data
[
x_dims
.
size
()];
for
(
size_t
i
=
0
;
i
<
x_dims
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
x_dims
.
size
();
i
++
)
{
i_x_shape_data
[
i
]
=
static_cast
<
int
>
(
x_shape_data
[
i
]);
i_x_shape_data
[
i
]
=
static_cast
<
int
>
(
x_shape_data
[
i
]);
}
}
shape
[
0
]
=
i_x_shape_data
;
// output
// output
auto
output_var_name
=
op_info
->
Output
(
"Out"
).
front
();
auto
output_var_name
=
op_info
->
Output
(
"Out"
).
front
();
auto
output
=
scope
->
FindVar
(
output_var_name
)
->
GetMutable
<
lite
::
Tensor
>
();
auto
output_dims
=
output
->
dims
();
const
long
int
*
output_shape_data
=
const_cast
<
const
long
int
*>
(
&
output_dims
.
data
()[
0
]);
int
i_output_shape_data
[
output_dims
.
size
()];
for
(
size_t
i
=
0
;
i
<
output_dims
.
size
();
i
++
)
{
i_output_shape_data
[
i
]
=
static_cast
<
int
>
(
output_shape_data
[
i
]);
}
auto
scale
=
op_info
->
GetAttr
<
float
>
(
"scale"
);
auto
scale
=
op_info
->
GetAttr
<
float
>
(
"scale"
);
auto
bias
=
op_info
->
GetAttr
<
float
>
(
"bias"
);
auto
bias
=
op_info
->
GetAttr
<
float
>
(
"bias"
);
auto
bias_after_scale
=
op_info
->
GetAttr
<
bool
>
(
"bias_after_scale"
);
auto
bias_after_scale
=
op_info
->
GetAttr
<
bool
>
(
"bias_after_scale"
);
if
(
bias_after_scale
)
{
if
(
!
bias_after_scale
)
{
bias
*=
scale
;
bias
*=
scale
;
}
}
add_scale_layer
(
graph_ctx
->
bm_compiler_handle
,
input_num
,
auto
unique_op_scale_name
=
lite
::
bm
::
UniqueName
(
op_type
);
shape
,
add_const_binary_layer
(
graph_ctx
->
bm_compiler_handle
,
dim
,
static_cast
<
const
char
*>
(
x_var_name
.
c_str
()),
name
,
const_cast
<
const
int
*>
(
i_x_shape_data
),
const_cast
<
const
int
*>
(
i_output_shape_data
),
x_dims
.
size
(),
output_dims
.
size
(),
scale
,
static_cast
<
const
char
*>
(
unique_op_scale_name
.
c_str
()),
BINARY_MUL
,
0
);
add_const_binary_layer
(
graph_ctx
->
bm_compiler_handle
,
static_cast
<
const
char
*>
(
unique_op_scale_name
.
c_str
()),
const_cast
<
const
int
*>
(
i_x_shape_data
),
x_dims
.
size
(),
bias
,
static_cast
<
const
char
*>
(
output_var_name
.
c_str
()),
static_cast
<
const
char
*>
(
output_var_name
.
c_str
()),
static_cast
<
const
char
*>
(
unique_op_name
.
c_str
()),
BINARY_ADD
,
&
scale
,
&
bias
,
1
,
1
,
0
);
0
);
delete
[]
shape
;
delete
[]
dim
;
delete
[]
name
;
output_nodes
[
output_var_name
]
=
output_var_name
;
output_nodes
[
output_var_name
]
=
output_var_name
;
return
output_nodes
;
return
output_nodes
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录