Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
efafc72f
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
efafc72f
编写于
9月 18, 2018
作者:
W
Wu Yi
提交者:
GitHub
9月 18, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Hide program APIs (#12315)
* hide program APIs * fix merge error * update
上级
c9e5c1e4
变更
14
显示空白变更内容
内联
并排
Showing
14 changed file
with
46 addition
and
53 deletion
+46
-53
paddle/fluid/API.spec
paddle/fluid/API.spec
+0
-7
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+2
-2
python/paddle/fluid/clip.py
python/paddle/fluid/clip.py
+2
-2
python/paddle/fluid/concurrency.py
python/paddle/fluid/concurrency.py
+2
-2
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+11
-11
python/paddle/fluid/io.py
python/paddle/fluid/io.py
+4
-4
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+2
-2
python/paddle/fluid/layers/io.py
python/paddle/fluid/layers/io.py
+2
-2
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+4
-4
python/paddle/fluid/regularizer.py
python/paddle/fluid/regularizer.py
+1
-1
python/paddle/fluid/tests/unittests/test_operator_desc.py
python/paddle/fluid/tests/unittests/test_operator_desc.py
+1
-1
python/paddle/fluid/tests/unittests/test_program.py
python/paddle/fluid/tests/unittests/test_program.py
+7
-7
python/paddle/fluid/transpiler/distribute_transpiler.py
python/paddle/fluid/transpiler/distribute_transpiler.py
+7
-7
python/paddle/fluid/transpiler/memory_optimization_transpiler.py
...paddle/fluid/transpiler/memory_optimization_transpiler.py
+1
-1
未找到文件。
paddle/fluid/API.spec
浏览文件 @
efafc72f
paddle.fluid.Program.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.block ArgSpec(args=['self', 'index'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.clone ArgSpec(args=['self', 'for_test'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.Program.copy_data_info_from ArgSpec(args=['self', 'other'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.create_block ArgSpec(args=['self', 'parent_idx'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.Program.current_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.get_desc ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.global_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.inference_optimize ArgSpec(args=['self', 'export_for_deployment'], varargs=None, keywords=None, defaults=(True,))
paddle.fluid.Program.list_vars ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.optimized_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.prune ArgSpec(args=['self', 'targets'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.rollback ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.Operator.__init__ ArgSpec(args=['self', 'block', 'desc', 'type', 'inputs', 'outputs', 'attrs'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.Operator.all_attrs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
...
...
python/paddle/fluid/backward.py
浏览文件 @
efafc72f
...
...
@@ -347,7 +347,7 @@ def _append_backward_ops_(block,
# If the op has its own sub-block, deal with the sub-block first
if
op
.
has_attr
(
"sub_block"
):
sub_block
=
program
.
block
(
op
.
block_attr_id
(
"sub_block"
))
grad_sub_block
=
program
.
create_block
()
grad_sub_block
=
program
.
_
create_block
()
grad_sub_block
.
_set_forward_block_idx
(
sub_block
.
idx
)
cb
=
_callback_lookup_
(
op
)
if
cb
is
not
None
:
...
...
@@ -361,7 +361,7 @@ def _append_backward_ops_(block,
_append_backward_ops_
(
sub_block
,
sub_block
.
ops
,
grad_sub_block
,
no_grad_dict
,
grad_to_var
,
callbacks
)
program
.
rollback
()
program
.
_
rollback
()
grad_sub_block_list
.
append
(
grad_sub_block
.
desc
)
# Getting op's corresponding grad_op
...
...
python/paddle/fluid/clip.py
浏览文件 @
efafc72f
...
...
@@ -331,7 +331,7 @@ def append_gradient_clip_ops(param_grads):
for
p
,
g
in
param_grads
:
if
g
is
None
:
continue
with
p
.
block
.
program
.
optimized_guard
([
p
,
g
]):
with
p
.
block
.
program
.
_
optimized_guard
([
p
,
g
]):
clip_attr
=
getattr
(
p
,
'gradient_clip_attr'
,
NullGradientClipAttr
())
if
clip_attr
is
None
:
clip_attr
=
NullGradientClipAttr
()
...
...
@@ -346,7 +346,7 @@ def append_gradient_clip_ops(param_grads):
for
p
,
g
in
param_grads
:
if
g
is
None
:
continue
with
p
.
block
.
program
.
optimized_guard
([
p
,
g
]):
with
p
.
block
.
program
.
_
optimized_guard
([
p
,
g
]):
res
.
append
(
clip_attr
.
_create_operators
(
param
=
p
,
grad
=
g
))
return
res
...
...
python/paddle/fluid/concurrency.py
浏览文件 @
efafc72f
...
...
@@ -126,7 +126,7 @@ class SelectCase(object):
self
.
channel
=
channel
def
__enter__
(
self
):
self
.
block
=
self
.
main_program
.
create_block
()
self
.
block
=
self
.
main_program
.
_
create_block
()
def
construct_op
(
self
):
main_program
=
self
.
helper
.
main_program
...
...
@@ -187,7 +187,7 @@ class SelectCase(object):
if
self
.
value
else
''
)
def
__exit__
(
self
,
exc_type
,
exc_val
,
exc_tb
):
self
.
main_program
.
rollback
()
self
.
main_program
.
_
rollback
()
if
exc_type
is
not
None
:
return
False
# re-raise exception
return
True
...
...
python/paddle/fluid/framework.py
浏览文件 @
efafc72f
...
...
@@ -935,7 +935,7 @@ class Block(object):
Notes:
The constructor of Block should not be invoked directly. Please
use `Program.create_block()` to create a block.
use `Program.
_
create_block()` to create a block.
Examples:
.. code-block:: python
...
...
@@ -1483,7 +1483,7 @@ class Program(object):
self
.
_op_role_var
=
[
var_name
]
@
contextlib
.
contextmanager
def
optimized_guard
(
self
,
param_and_grads
):
def
_
optimized_guard
(
self
,
param_and_grads
):
"""
A with guard to set :code:`Optimization` :code:`OpRole` and
:code:`OpRoleVar` automatically.
...
...
@@ -1496,7 +1496,7 @@ class Program(object):
Examples:
>>> p, g = backward(...)
>>> with program.optimized_guard([p,g]):
>>> with program.
_
optimized_guard([p,g]):
>>> p = p - 0.001 * g
"""
OpRole
=
core
.
op_proto_and_checker_maker
.
OpRole
...
...
@@ -1554,7 +1554,7 @@ class Program(object):
res_str
=
_debug_string_
(
proto
,
throw_on_error
)
return
res_str
def
get_desc
(
self
):
def
_
get_desc
(
self
):
"""
Get the C++ side of `ProgramDesc` object pointer. The C++ object is
exposed by :code:`pybind`.
...
...
@@ -1647,7 +1647,7 @@ class Program(object):
The two code snippets above will generate same programs.
"""
if
for_test
:
p
=
self
.
inference_optimize
(
export_for_deployment
=
False
)
p
=
self
.
_
inference_optimize
(
export_for_deployment
=
False
)
else
:
p
=
Program
()
p
.
current_block_idx
=
self
.
current_block_idx
...
...
@@ -1663,10 +1663,10 @@ class Program(object):
p
.
_sync_with_cpp
()
p
.
_copy_param_info_from
(
self
)
p
.
copy_data_info_from
(
self
)
p
.
_
copy_data_info_from
(
self
)
return
p
def
prune
(
self
,
targets
):
def
_
prune
(
self
,
targets
):
"""
Prune operators and variables which are not needed to generate
:code:`targets`.
...
...
@@ -1717,7 +1717,7 @@ class Program(object):
res
.
_sync_with_cpp
()
return
res
def
inference_optimize
(
self
,
export_for_deployment
=
True
):
def
_
inference_optimize
(
self
,
export_for_deployment
=
True
):
"""
This method will create a new program and do following adjustments on it:
1. Remove all reader variables and their creator ops if exist.
...
...
@@ -1841,7 +1841,7 @@ class Program(object):
"""
return
self
.
blocks
[
self
.
current_block_idx
]
def
create_block
(
self
,
parent_idx
=
None
):
def
_
create_block
(
self
,
parent_idx
=
None
):
"""
Create a new block with the :code:`parent_idx` and change the current block
to new block.
...
...
@@ -1860,7 +1860,7 @@ class Program(object):
self
.
blocks
.
append
(
Block
(
self
,
self
.
current_block_idx
))
return
self
.
current_block
()
def
rollback
(
self
):
def
_
rollback
(
self
):
"""
Exit a code block, i.e., roll back to the parent block.
Returns:
...
...
@@ -1906,7 +1906,7 @@ class Program(object):
"program, with represent the same topology"
)
self
.
global_block
().
_copy_param_info_from
(
other
.
global_block
())
def
copy_data_info_from
(
self
,
other
):
def
_
copy_data_info_from
(
self
,
other
):
"""
Copy the information of data variables from other program.
...
...
python/paddle/fluid/io.py
浏览文件 @
efafc72f
...
...
@@ -515,8 +515,8 @@ def get_inference_program(target_vars, main_program=None):
vars
.
extend
(
var
.
metrics
)
else
:
vars
.
append
(
var
)
pruned_program
=
main_program
.
prune
(
targets
=
vars
)
inference_program
=
pruned_program
.
inference_optimize
()
pruned_program
=
main_program
.
_
prune
(
targets
=
vars
)
inference_program
=
pruned_program
.
_
inference_optimize
()
return
inference_program
...
...
@@ -644,8 +644,8 @@ def save_inference_model(dirname,
global_block
.
_remove_op
(
i
)
copy_program
.
desc
.
flush
()
pruned_program
=
copy_program
.
prune
(
targets
=
target_vars
)
inference_program
=
pruned_program
.
inference_optimize
(
pruned_program
=
copy_program
.
_
prune
(
targets
=
target_vars
)
inference_program
=
pruned_program
.
_
inference_optimize
(
export_for_deployment
=
export_for_deployment
)
fetch_var_names
=
[
v
.
name
for
v
in
target_vars
]
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
efafc72f
...
...
@@ -217,10 +217,10 @@ class BlockGuard(object):
self
.
main_program
=
main_program
def
__enter__
(
self
):
self
.
main_program
.
create_block
()
self
.
main_program
.
_
create_block
()
def
__exit__
(
self
,
exc_type
,
exc_val
,
exc_tb
):
self
.
main_program
.
rollback
()
self
.
main_program
.
_
rollback
()
if
exc_type
is
not
None
:
return
False
# re-raise exception
return
True
...
...
python/paddle/fluid/layers/io.py
浏览文件 @
efafc72f
...
...
@@ -1008,9 +1008,9 @@ class Preprocessor(object):
@
contextlib
.
contextmanager
def
block
(
self
):
self
.
status
=
Preprocessor
.
IN_SUB_BLOCK
self
.
sub_block
=
self
.
main_prog
.
create_block
()
self
.
sub_block
=
self
.
main_prog
.
_
create_block
()
yield
self
.
main_prog
.
rollback
()
self
.
main_prog
.
_
rollback
()
self
.
status
=
Preprocessor
.
AFTER_SUB_BLOCK
if
not
self
.
_is_completed
():
raise
RuntimeError
(
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
efafc72f
...
...
@@ -236,7 +236,7 @@ class Optimizer(object):
for
param_and_grad
in
parameters_and_grads
:
if
param_and_grad
[
1
]
is
None
:
continue
with
param_and_grad
[
0
].
block
.
program
.
optimized_guard
(
with
param_and_grad
[
0
].
block
.
program
.
_
optimized_guard
(
param_and_grad
),
name_scope
(
"optimizer"
):
if
param_and_grad
[
0
].
trainable
is
True
:
optimize_op
=
self
.
_append_optimize_op
(
loss
.
block
,
...
...
@@ -580,7 +580,7 @@ class AdamOptimizer(Optimizer):
for
param
,
grad
in
param_and_grads
:
if
grad
is
None
:
continue
with
param
.
block
.
program
.
optimized_guard
([
param
,
grad
]):
with
param
.
block
.
program
.
_
optimized_guard
([
param
,
grad
]):
beta1_pow_acc
=
self
.
_get_accumulator
(
self
.
_beta1_pow_acc_str
,
param
)
beta2_pow_acc
=
self
.
_get_accumulator
(
self
.
_beta2_pow_acc_str
,
...
...
@@ -709,7 +709,7 @@ class AdamaxOptimizer(Optimizer):
for
param
,
grad
in
parameters_and_grads
:
if
grad
is
None
:
continue
with
param
.
block
.
program
.
optimized_guard
([
param
,
grad
]):
with
param
.
block
.
program
.
_
optimized_guard
([
param
,
grad
]):
beta1_pow_acc
=
self
.
_get_accumulator
(
self
.
_beta1_pow_acc_str
,
param
)
main_block
.
append_op
(
...
...
@@ -1198,7 +1198,7 @@ class ModelAverage(Optimizer):
for
param
,
grad
in
self
.
params_grads
:
if
grad
is
None
:
continue
with
param
.
block
.
program
.
optimized_guard
([
param
,
grad
]):
with
param
.
block
.
program
.
_
optimized_guard
([
param
,
grad
]):
self
.
_append_average_accumulate_op
(
param
)
self
.
apply_program
=
Program
()
...
...
python/paddle/fluid/regularizer.py
浏览文件 @
efafc72f
...
...
@@ -47,7 +47,7 @@ def append_regularization_ops(parameters_and_grads, regularization=None):
if
grad
is
None
:
params_and_grads
.
append
((
param
,
grad
))
continue
with
param
.
block
.
program
.
optimized_guard
([
param
,
grad
]):
with
param
.
block
.
program
.
_
optimized_guard
([
param
,
grad
]):
regularization_term
=
None
if
param
.
regularizer
is
not
None
:
# Add variable for regularization term in grad block
...
...
python/paddle/fluid/tests/unittests/test_operator_desc.py
浏览文件 @
efafc72f
...
...
@@ -26,7 +26,7 @@ main_program = default_startup_program()
class
TestOperator
(
unittest
.
TestCase
):
def
test_error_type
(
self
):
block
=
main_program
.
create_block
()
block
=
main_program
.
_
create_block
()
try
:
block
.
append_op
()
self
.
assertFail
()
...
...
python/paddle/fluid/tests/unittests/test_program.py
浏览文件 @
efafc72f
...
...
@@ -28,25 +28,25 @@ class TestProgram(unittest.TestCase):
self
.
assertEqual
(
-
1
,
b
.
parent_idx
)
self
.
assertEqual
(
0
,
b
.
idx
)
b
=
main_program
.
create_block
()
b
=
main_program
.
_
create_block
()
self
.
assertEqual
(
1
,
b
.
idx
)
self
.
assertEqual
(
0
,
b
.
parent_idx
)
b
=
main_program
.
create_block
()
b
=
main_program
.
_
create_block
()
self
.
assertEqual
(
2
,
b
.
idx
)
self
.
assertEqual
(
1
,
b
.
parent_idx
)
main_program
.
rollback
()
main_program
.
_
rollback
()
b
=
main_program
.
current_block
()
self
.
assertEqual
(
1
,
b
.
idx
)
self
.
assertEqual
(
0
,
b
.
parent_idx
)
b
=
main_program
.
create_block
()
b
=
main_program
.
_
create_block
()
self
.
assertEqual
(
3
,
b
.
idx
)
self
.
assertEqual
(
1
,
b
.
parent_idx
)
main_program
.
rollback
()
main_program
.
_
rollback
()
b
=
main_program
.
current_block
()
self
.
assertEqual
(
1
,
b
.
idx
)
self
.
assertEqual
(
0
,
b
.
parent_idx
)
...
...
@@ -120,8 +120,8 @@ class TestProgram(unittest.TestCase):
main_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
,
startup_program
):
net
()
no_read_program
=
main_program
.
inference_optimize
()
keep_read_program
=
main_program
.
inference_optimize
(
no_read_program
=
main_program
.
_
inference_optimize
()
keep_read_program
=
main_program
.
_
inference_optimize
(
export_for_deployment
=
False
)
no_read_ops
=
no_read_program
.
global_block
().
ops
keep_read_ops
=
keep_read_program
.
global_block
().
ops
...
...
python/paddle/fluid/transpiler/distribute_transpiler.py
浏览文件 @
efafc72f
...
...
@@ -580,7 +580,7 @@ class DistributeTranspiler(object):
assert
isinstance
(
origin_block
,
Block
)
# we put the new sub block to new block to follow the block
# hierarchy of the original blocks
new_sub_block
=
program
.
create_block
(
lr_block
.
idx
)
new_sub_block
=
program
.
_
create_block
(
lr_block
.
idx
)
# clone vars
for
var
in
origin_block
.
vars
:
...
...
@@ -600,7 +600,7 @@ class DistributeTranspiler(object):
# record optimize blocks and we can run them on pserver parallel
optimize_blocks
=
[]
if
len
(
lr_ops
)
>
0
:
lr_decay_block
=
pserver_program
.
create_block
(
lr_decay_block
=
pserver_program
.
_
create_block
(
pserver_program
.
num_blocks
-
1
)
optimize_blocks
.
append
(
lr_decay_block
)
for
_
,
op
in
enumerate
(
lr_ops
):
...
...
@@ -613,7 +613,7 @@ class DistributeTranspiler(object):
grad_to_block_id
=
[]
pre_block_idx
=
pserver_program
.
num_blocks
-
1
for
idx
,
opt_op
in
enumerate
(
opt_op_on_pserver
):
per_opt_block
=
pserver_program
.
create_block
(
pre_block_idx
)
per_opt_block
=
pserver_program
.
_
create_block
(
pre_block_idx
)
optimize_blocks
.
append
(
per_opt_block
)
# append grad merging ops before clip and weight decay
# cases may like:
...
...
@@ -636,7 +636,7 @@ class DistributeTranspiler(object):
grad_to_block_id
=
list
(
set
(
grad_to_block_id
))
# append global ops
if
global_ops
:
opt_state_block
=
pserver_program
.
create_block
(
opt_state_block
=
pserver_program
.
_
create_block
(
pserver_program
.
num_blocks
-
1
)
optimize_blocks
.
append
(
opt_state_block
)
for
glb_op
in
global_ops
:
...
...
@@ -1073,7 +1073,7 @@ class DistributeTranspiler(object):
table_var
=
pserver_program
.
global_block
().
vars
[
self
.
table_name
]
prefetch_var_name_to_block_id
=
[]
for
index
in
range
(
len
(
self
.
all_prefetch_input_vars
)):
prefetch_block
=
pserver_program
.
create_block
(
optimize_block
.
idx
)
prefetch_block
=
pserver_program
.
_
create_block
(
optimize_block
.
idx
)
trainer_ids
=
self
.
all_prefetch_input_vars
[
index
][
pserver_index
]
pserver_ids
=
pserver_program
.
global_block
().
create_var
(
name
=
trainer_ids
.
name
,
...
...
@@ -1131,7 +1131,7 @@ class DistributeTranspiler(object):
if
'Param'
in
op
.
input_names
and
op
.
input
(
"Param"
)[
0
]
==
self
.
table_name
][
0
]
table_opt_block
=
pserver_program
.
create_block
(
pre_block_idx
)
table_opt_block
=
pserver_program
.
_
create_block
(
pre_block_idx
)
if
self
.
sync_mode
:
# create grad vars in pserver program
...
...
@@ -1194,7 +1194,7 @@ class DistributeTranspiler(object):
persistable
=
True
,
type
=
core
.
VarDesc
.
VarType
.
RAW
)
checkpoint_save_block
=
pserver_program
.
create_block
(
pre_block_idx
)
checkpoint_save_block
=
pserver_program
.
_
create_block
(
pre_block_idx
)
# this 'file_path' do not be used in save lookup table variable
checkpoint_save_block
.
append_op
(
type
=
'save'
,
...
...
python/paddle/fluid/transpiler/memory_optimization_transpiler.py
浏览文件 @
efafc72f
...
...
@@ -357,7 +357,7 @@ def _get_cfgs(input_program):
:return: A list of ControlFlowGraph, each corresponds to a block.
"""
ops_list
=
[]
pdesc
=
input_program
.
get_desc
()
pdesc
=
input_program
.
_
get_desc
()
block_desc
=
pdesc
.
block
(
0
)
op_size
=
block_desc
.
op_size
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录