Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
897789b1
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
897789b1
编写于
1月 31, 2019
作者:
Y
Yan Chunwei
提交者:
GitHub
1月 31, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix save_inferece_model bug (#15365)
上级
ba02ac46
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
131 addition
and
2 deletion
+131
-2
paddle/fluid/framework/ir/CMakeLists.txt
paddle/fluid/framework/ir/CMakeLists.txt
+1
-0
paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc
paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc
+80
-0
paddle/fluid/framework/ir/identity_scale_op_clean_pass.h
paddle/fluid/framework/ir/identity_scale_op_clean_pass.h
+33
-0
paddle/fluid/inference/api/paddle_pass_builder.h
paddle/fluid/inference/api/paddle_pass_builder.h
+2
-0
python/paddle/fluid/io.py
python/paddle/fluid/io.py
+13
-1
python/paddle/fluid/tests/unittests/test_inference_model_io.py
...n/paddle/fluid/tests/unittests/test_inference_model_io.py
+2
-1
未找到文件。
paddle/fluid/framework/ir/CMakeLists.txt
浏览文件 @
897789b1
...
...
@@ -65,6 +65,7 @@ pass_library(conv_elementwise_add2_act_fuse_pass inference)
pass_library
(
conv_elementwise_add_fuse_pass inference
)
pass_library
(
conv_affine_channel_fuse_pass inference
)
pass_library
(
transpose_flatten_concat_fuse_pass inference
)
pass_library
(
identity_scale_op_clean_pass base
)
# There may be many transpose-flatten structures in a model, and the output of
# these structures will be used as inputs to the concat Op. This pattern will
...
...
paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc
0 → 100644
浏览文件 @
897789b1
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/ir/identity_scale_op_clean_pass.h"
#include <string>
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
namespace
paddle
{
namespace
framework
{
namespace
ir
{
std
::
unique_ptr
<
ir
::
Graph
>
IdentityScaleOpCleanPass
::
ApplyImpl
(
std
::
unique_ptr
<
ir
::
Graph
>
graph
)
const
{
FusePassBase
::
Init
(
"identity_scale_op_clean"
,
graph
.
get
());
// pre_op -> scale_in -> scale_op -> scale_out
// ->
// pre_op -> scale_out
GraphPatternDetector
detector
;
auto
pre_op
=
detector
.
mutable_pattern
()
->
NewNode
(
"pre_op"
)
->
assert_is_op
();
auto
scale_in
=
detector
.
mutable_pattern
()
->
NewNode
(
"scale_in"
)
->
assert_is_op_input
(
"scale"
)
->
AsIntermediate
();
auto
scale_op
=
detector
.
mutable_pattern
()
->
NewNode
(
"scale_fuse"
)
->
assert_is_op
(
"scale"
)
->
assert_op_attr
<
float
>
(
"scale"
,
1.
)
->
assert_op_attr
<
float
>
(
"bias"
,
0.
);
auto
scale_out
=
detector
.
mutable_pattern
()
->
NewNode
(
"scale_out"
)
->
assert_is_op_output
(
"scale"
);
pre_op
->
LinksTo
({
scale_in
});
scale_op
->
LinksFrom
({
scale_in
}).
LinksTo
({
scale_out
});
GraphPatternDetector
::
handle_t
handler
=
[
&
](
const
GraphPatternDetector
::
subgraph_t
&
subgraph
,
Graph
*
graph
)
{
Node
*
scale_op_var
=
subgraph
.
at
(
scale_op
);
Node
*
scale_in_var
=
subgraph
.
at
(
scale_in
);
Node
*
scale_out_var
=
subgraph
.
at
(
scale_out
);
Node
*
pre_op_var
=
subgraph
.
at
(
pre_op
);
// Link pre_op directly to scale_out
const
std
::
string
scale_in_name
=
scale_in_var
->
Name
();
const
std
::
string
scale_out_name
=
scale_out_var
->
Name
();
// Remove links in graph
GraphSafeRemoveNodes
(
graph
,
{
scale_in_var
,
scale_op_var
});
// Modify proto message
auto
*
pre_op_desc
=
pre_op_var
->
Op
();
for
(
auto
&
parameter
:
*
pre_op_desc
->
Proto
()
->
mutable_outputs
())
{
auto
*
arguments
=
parameter
.
mutable_arguments
();
auto
it
=
std
::
find
(
arguments
->
begin
(),
arguments
->
end
(),
scale_in_name
);
PADDLE_ENFORCE
(
it
!=
arguments
->
end
());
*
it
=
scale_out_name
;
}
IR_NODE_LINK_TO
(
pre_op_var
,
scale_out_var
);
};
detector
(
graph
.
get
(),
handler
);
return
graph
;
}
}
// namespace ir
}
// namespace framework
}
// namespace paddle
REGISTER_PASS
(
identity_scale_op_clean_pass
,
paddle
::
framework
::
ir
::
IdentityScaleOpCleanPass
);
paddle/fluid/framework/ir/identity_scale_op_clean_pass.h
0 → 100644
浏览文件 @
897789b1
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
namespace
paddle
{
namespace
framework
{
namespace
ir
{
class
IdentityScaleOpCleanPass
:
public
FusePassBase
{
protected:
std
::
unique_ptr
<
ir
::
Graph
>
ApplyImpl
(
std
::
unique_ptr
<
ir
::
Graph
>
graph
)
const
;
private:
virtual
~
IdentityScaleOpCleanPass
()
=
default
;
};
}
// namespace ir
}
// namespace framework
}
// namespace paddle
paddle/fluid/inference/api/paddle_pass_builder.h
浏览文件 @
897789b1
...
...
@@ -117,6 +117,7 @@ class CpuPassStrategy : public PassStrategy {
"conv_bn_fuse_pass"
,
//
"conv_eltwiseadd_bn_fuse_pass"
,
//
"is_test_pass"
,
//
"identity_scale_op_clean_pass"
,
//
});
use_gpu_
=
false
;
}
...
...
@@ -155,6 +156,7 @@ class GpuPassStrategy : public PassStrategy {
GpuPassStrategy
()
:
PassStrategy
({})
{
passes_
.
assign
({
"infer_clean_graph_pass"
,
//
"identity_scale_op_clean_pass"
,
//
"conv_affine_channel_fuse_pass"
,
//
"conv_eltwiseadd_affine_channel_fuse_pass"
,
//
"conv_bn_fuse_pass"
,
//
...
...
python/paddle/fluid/io.py
浏览文件 @
897789b1
...
...
@@ -21,9 +21,10 @@ import shutil
import
six
from
functools
import
reduce
from
paddle.fluid
import
layers
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.evaluator
import
Evaluator
from
paddle.fluid.framework
import
Program
,
Parameter
,
default_main_program
,
default_startup_program
,
Variable
from
paddle.fluid.framework
import
Program
,
Parameter
,
default_main_program
,
default_startup_program
,
Variable
,
program_guard
from
.
import
core
__all__
=
[
...
...
@@ -931,6 +932,17 @@ def save_inference_model(dirname,
if
main_program
is
None
:
main_program
=
default_main_program
()
# fix the bug that the activation op's output as target will be pruned.
# will affect the inference performance.
# TODO(Superjomn) add an IR pass to remove 1-scale op.
with
program_guard
(
main_program
):
uniq_target_vars
=
[]
for
var
in
target_vars
:
if
isinstance
(
var
,
Variable
):
var1
=
layers
.
scale
(
var
,
1.
)
uniq_target_vars
.
append
(
var1
)
target_vars
=
uniq_target_vars
# when a pserver and a trainer running on the same machine, mkdir may conflict
try
:
os
.
makedirs
(
dirname
)
...
...
python/paddle/fluid/tests/unittests/test_inference_model_io.py
浏览文件 @
897789b1
...
...
@@ -82,7 +82,8 @@ class TestBook(unittest.TestCase):
self
.
assertEqual
(
feed_var_names
,
[
"x"
,
"y"
])
self
.
assertEqual
(
len
(
fetch_vars
),
1
)
self
.
assertEqual
(
str
(
fetch_vars
[
0
]),
str
(
avg_cost
))
print
(
"fetch %s"
%
str
(
fetch_vars
[
0
]))
self
.
assertTrue
(
"scale"
in
str
(
fetch_vars
[
0
]))
self
.
assertEqual
(
expected
,
actual
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录