Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
21a80312
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
21a80312
编写于
6月 15, 2019
作者:
H
hong19860320
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change pool to pool2d to fix the bug of pooling unit test, and refine code
test=develop
上级
b69262cf
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
79 addition
and
81 deletion
+79
-81
paddle/fluid/lite/api/cxx_api_bin.cc
paddle/fluid/lite/api/cxx_api_bin.cc
+0
-4
paddle/fluid/lite/core/mir/passes.h
paddle/fluid/lite/core/mir/passes.h
+2
-0
paddle/fluid/lite/core/optimizer.h
paddle/fluid/lite/core/optimizer.h
+11
-10
paddle/fluid/lite/operators/batch_norm_op.cc
paddle/fluid/lite/operators/batch_norm_op.cc
+1
-2
paddle/fluid/lite/operators/batch_norm_op_test.cc
paddle/fluid/lite/operators/batch_norm_op_test.cc
+63
-63
paddle/fluid/lite/operators/pool_op.h
paddle/fluid/lite/operators/pool_op.h
+1
-1
paddle/fluid/lite/operators/pool_op_test.cc
paddle/fluid/lite/operators/pool_op_test.cc
+1
-1
未找到文件。
paddle/fluid/lite/api/cxx_api_bin.cc
浏览文件 @
21a80312
...
...
@@ -13,11 +13,7 @@
// limitations under the License.
#include "paddle/fluid/lite/api/cxx_api.h"
// #ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
#include "paddle/fluid/lite/core/mir/passes.h"
// #endif
#include "paddle/fluid/lite/core/op_registry.h"
namespace
paddle
{
...
...
paddle/fluid/lite/core/mir/passes.h
浏览文件 @
21a80312
...
...
@@ -21,6 +21,7 @@ namespace mir {} // namespace mir
}
// namespace lite
}
// namespace paddle
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
USE_MIR_PASS
(
demo
);
USE_MIR_PASS
(
static_kernel_pick_pass
);
USE_MIR_PASS
(
variable_place_inference_pass
);
...
...
@@ -28,4 +29,5 @@ USE_MIR_PASS(type_target_transform_pass);
USE_MIR_PASS
(
generate_program_pass
);
USE_MIR_PASS
(
io_copy_kernel_pick_pass
);
USE_MIR_PASS
(
argument_type_display_pass
);
#endif
USE_MIR_PASS
(
runtime_context_assign_pass
);
paddle/fluid/lite/core/optimizer.h
浏览文件 @
21a80312
...
...
@@ -46,18 +46,19 @@ class Optimizer {
SpecifyKernelPickTactic
(
kernel_pick_factor
);
InitTargetTypeTransformPass
();
// #ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
if
(
passes
.
empty
())
{
RunPasses
(
std
::
vector
<
std
::
string
>
{{
// "static_kernel_pick_pass", //
// "variable_place_inference_pass", //
// "argument_type_display_pass", //
// "type_target_transform_pass", //
// "argument_type_display_pass", //
// "variable_place_inference_pass", //
// "argument_type_display_pass", //
// "io_copy_kernel_pick_pass", //
// "variable_place_inference_pass", //
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
"static_kernel_pick_pass"
,
//
"variable_place_inference_pass"
,
//
"argument_type_display_pass"
,
//
"type_target_transform_pass"
,
//
"argument_type_display_pass"
,
//
"variable_place_inference_pass"
,
//
"argument_type_display_pass"
,
//
"io_copy_kernel_pick_pass"
,
//
"variable_place_inference_pass"
,
//
#endif
"runtime_context_assign_pass"
,
//
}});
}
else
{
...
...
paddle/fluid/lite/operators/batch_norm_op.cc
浏览文件 @
21a80312
...
...
@@ -82,8 +82,7 @@ bool BatchNormOp::AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) {
param_
.
variance
=
scope
->
FindVar
(
op_desc
.
Input
(
"Variance"
).
front
())
->
GetMutable
<
Tensor
>
();
param_
.
y
=
scope
->
FindVar
(
op_desc
.
Output
(
"Y"
).
front
())
->
GetMutable
<
Tensor
>
();
param_
.
is_test
=
true
;
// TODO(hong19860320) param_.is_test =
// op_desc.GetAttr<int>("is_test");
param_
.
is_test
=
op_desc
.
GetAttr
<
int
>
(
"is_test"
);
param_
.
use_global_stats
=
op_desc
.
GetAttr
<
bool
>
(
"use_global_stats"
);
if
(
!
param_
.
is_test
)
{
param_
.
mean_out
=
...
...
paddle/fluid/lite/operators/batch_norm_op_test.cc
浏览文件 @
21a80312
...
...
@@ -46,7 +46,7 @@ TEST(batch_norm_op_lite, test) {
desc
.
SetInput
(
"Mean"
,
{
"mean"
});
desc
.
SetInput
(
"Variance"
,
{
"variance"
});
desc
.
SetOutput
(
"Y"
,
{
"y"
});
desc
.
SetAttr
(
"is_test"
,
true
);
desc
.
SetAttr
(
"is_test"
,
static_cast
<
int
>
(
1
)
);
desc
.
SetAttr
(
"use_global_stats"
,
false
);
desc
.
SetAttr
(
"epsilon"
,
1e-5
f
);
desc
.
SetAttr
(
"momentum"
,
0.9
f
);
...
...
@@ -67,72 +67,72 @@ TEST(batch_norm_op_lite, test) {
}
}
//
TEST(batch_norm_op_lite, test_enable_is_test) {
//
// prepare variables
//
Scope scope;
//
auto* x = scope.Var("x")->GetMutable<Tensor>();
//
auto* scale = scope.Var("scale")->GetMutable<Tensor>();
//
auto* bias = scope.Var("bias")->GetMutable<Tensor>();
//
auto* mean = scope.Var("mean")->GetMutable<Tensor>();
//
auto* variance = scope.Var("variance")->GetMutable<Tensor>();
//
auto* y = scope.Var("y")->GetMutable<Tensor>();
//
auto* mean_out = scope.Var("mean_out")->GetMutable<Tensor>();
//
auto* variance_out = scope.Var("variance_out")->GetMutable<Tensor>();
//
auto* saved_mean = scope.Var("saved_mean")->GetMutable<Tensor>();
//
auto* saved_variance = scope.Var("saved_variance")->GetMutable<Tensor>();
//
x->Resize({2, 32, 10, 20});
//
auto x_dims = x->dims();
//
const int64_t channel_size = x_dims[1]; // NCHW
//
scale->Resize({channel_size});
//
bias->Resize({channel_size});
//
mean->Resize({channel_size});
//
variance->Resize({channel_size});
TEST
(
batch_norm_op_lite
,
test_enable_is_test
)
{
// prepare variables
Scope
scope
;
auto
*
x
=
scope
.
Var
(
"x"
)
->
GetMutable
<
Tensor
>
();
auto
*
scale
=
scope
.
Var
(
"scale"
)
->
GetMutable
<
Tensor
>
();
auto
*
bias
=
scope
.
Var
(
"bias"
)
->
GetMutable
<
Tensor
>
();
auto
*
mean
=
scope
.
Var
(
"mean"
)
->
GetMutable
<
Tensor
>
();
auto
*
variance
=
scope
.
Var
(
"variance"
)
->
GetMutable
<
Tensor
>
();
auto
*
y
=
scope
.
Var
(
"y"
)
->
GetMutable
<
Tensor
>
();
auto
*
mean_out
=
scope
.
Var
(
"mean_out"
)
->
GetMutable
<
Tensor
>
();
auto
*
variance_out
=
scope
.
Var
(
"variance_out"
)
->
GetMutable
<
Tensor
>
();
auto
*
saved_mean
=
scope
.
Var
(
"saved_mean"
)
->
GetMutable
<
Tensor
>
();
auto
*
saved_variance
=
scope
.
Var
(
"saved_variance"
)
->
GetMutable
<
Tensor
>
();
x
->
Resize
({
2
,
32
,
10
,
20
});
auto
x_dims
=
x
->
dims
();
const
int64_t
channel_size
=
x_dims
[
1
];
// NCHW
scale
->
Resize
({
channel_size
});
bias
->
Resize
({
channel_size
});
mean
->
Resize
({
channel_size
});
variance
->
Resize
({
channel_size
});
//
// prepare op desc
//
cpp::OpDesc desc;
//
desc.SetType("batch_norm");
//
desc.SetInput("X", {"x"});
//
desc.SetInput("Scale", {"scale"});
//
desc.SetInput("Bias", {"bias"});
//
desc.SetInput("Mean", {"mean"});
//
desc.SetInput("Variance", {"variance"});
//
desc.SetOutput("Y", {"y"});
//
desc.SetOutput("MeanOut", {"mean_out"});
//
desc.SetOutput("VarianceOut", {"variance_out"});
//
desc.SetOutput("SavedMean", {"saved_mean"});
//
desc.SetOutput("SavedVariance", {"saved_variance"});
// desc.SetAttr("is_test", false
);
//
desc.SetAttr("use_global_stats", false);
//
desc.SetAttr("epsilon", 1e-5f);
//
desc.SetAttr("momentum", 0.9f);
//
desc.SetAttr("data_layout", std::string("NCHW"));
// prepare op desc
cpp
::
OpDesc
desc
;
desc
.
SetType
(
"batch_norm"
);
desc
.
SetInput
(
"X"
,
{
"x"
});
desc
.
SetInput
(
"Scale"
,
{
"scale"
});
desc
.
SetInput
(
"Bias"
,
{
"bias"
});
desc
.
SetInput
(
"Mean"
,
{
"mean"
});
desc
.
SetInput
(
"Variance"
,
{
"variance"
});
desc
.
SetOutput
(
"Y"
,
{
"y"
});
desc
.
SetOutput
(
"MeanOut"
,
{
"mean_out"
});
desc
.
SetOutput
(
"VarianceOut"
,
{
"variance_out"
});
desc
.
SetOutput
(
"SavedMean"
,
{
"saved_mean"
});
desc
.
SetOutput
(
"SavedVariance"
,
{
"saved_variance"
});
desc
.
SetAttr
(
"is_test"
,
static_cast
<
int
>
(
0
)
);
desc
.
SetAttr
(
"use_global_stats"
,
false
);
desc
.
SetAttr
(
"epsilon"
,
1e-5
f
);
desc
.
SetAttr
(
"momentum"
,
0.9
f
);
desc
.
SetAttr
(
"data_layout"
,
std
::
string
(
"NCHW"
));
//
BatchNormOp batch_norm("batch_norm");
BatchNormOp
batch_norm
(
"batch_norm"
);
//
batch_norm.SetValidPlaces({Place{TARGET(kHost), PRECISION(kFloat)}});
//
batch_norm.Attach(desc, &scope);
//
batch_norm.CheckShape();
//
batch_norm.InferShape();
batch_norm
.
SetValidPlaces
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)}});
batch_norm
.
Attach
(
desc
,
&
scope
);
batch_norm
.
CheckShape
();
batch_norm
.
InferShape
();
//
// check output dims
//
auto y_dims = y->dims();
//
CHECK_EQ(y_dims.size(), x_dims.size());
//
for (size_t i = 0; i < y_dims.size(); i++) {
//
CHECK_EQ(y_dims[i], x_dims[i]);
//
}
//
auto mean_out_dims = mean_out->dims();
//
auto variance_out_dims = variance_out->dims();
//
auto saved_mean_dims = saved_mean->dims();
//
auto saved_variance_dims = saved_variance->dims();
//
CHECK_EQ(mean_out_dims.size(), 1UL);
//
CHECK_EQ(variance_out_dims.size(), 1UL);
//
CHECK_EQ(saved_mean_dims.size(), 1UL);
//
CHECK_EQ(saved_variance_dims.size(), 1UL);
//
CHECK_EQ(mean_out_dims[0], channel_size);
//
CHECK_EQ(variance_out_dims[0], channel_size);
//
CHECK_EQ(saved_mean_dims[0], channel_size);
//
CHECK_EQ(saved_variance_dims[0], channel_size);
//
}
// check output dims
auto
y_dims
=
y
->
dims
();
CHECK_EQ
(
y_dims
.
size
(),
x_dims
.
size
());
for
(
size_t
i
=
0
;
i
<
y_dims
.
size
();
i
++
)
{
CHECK_EQ
(
y_dims
[
i
],
x_dims
[
i
]);
}
auto
mean_out_dims
=
mean_out
->
dims
();
auto
variance_out_dims
=
variance_out
->
dims
();
auto
saved_mean_dims
=
saved_mean
->
dims
();
auto
saved_variance_dims
=
saved_variance
->
dims
();
CHECK_EQ
(
mean_out_dims
.
size
(),
1UL
);
CHECK_EQ
(
variance_out_dims
.
size
(),
1UL
);
CHECK_EQ
(
saved_mean_dims
.
size
(),
1UL
);
CHECK_EQ
(
saved_variance_dims
.
size
(),
1UL
);
CHECK_EQ
(
mean_out_dims
[
0
],
channel_size
);
CHECK_EQ
(
variance_out_dims
[
0
],
channel_size
);
CHECK_EQ
(
saved_mean_dims
[
0
],
channel_size
);
CHECK_EQ
(
saved_variance_dims
[
0
],
channel_size
);
}
}
// namespace operators
}
// namespace lite
...
...
paddle/fluid/lite/operators/pool_op.h
浏览文件 @
21a80312
...
...
@@ -71,7 +71,7 @@ class PoolOpLite : public OpLite {
void
AttachKernel
(
KernelBase
*
kernel
)
override
{
kernel
->
SetParam
(
param_
);
}
std
::
string
DebugString
()
const
override
{
return
"pool"
;
}
std
::
string
DebugString
()
const
override
{
return
"pool
2d
"
;
}
private:
mutable
PoolParam
param_
;
...
...
paddle/fluid/lite/operators/pool_op_test.cc
浏览文件 @
21a80312
...
...
@@ -38,7 +38,7 @@ TEST(pool_op_lite, test) {
// prepare op desc
cpp
::
OpDesc
desc
;
desc
.
SetType
(
"pool"
);
desc
.
SetType
(
"pool
2d
"
);
desc
.
SetInput
(
"X"
,
{
"x"
});
desc
.
SetOutput
(
"Out"
,
{
"output"
});
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录