Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
b94f7848
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b94f7848
编写于
6月 20, 2018
作者:
Y
Yu Yang
提交者:
GitHub
6月 20, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #11531 from reyoung/feature/non_layer_api_doc
Polish Non-Layer API
上级
8d5ab1f9
d1203e38
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
545 addition
and
93 deletion
+545
-93
python/paddle/fluid/__init__.py
python/paddle/fluid/__init__.py
+2
-1
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+20
-0
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+281
-43
python/paddle/fluid/lod_tensor.py
python/paddle/fluid/lod_tensor.py
+48
-33
python/paddle/fluid/recordio_writer.py
python/paddle/fluid/recordio_writer.py
+50
-0
python/paddle/fluid/trainer.py
python/paddle/fluid/trainer.py
+133
-15
python/paddle/fluid/transpiler/memory_optimization_transpiler.py
...paddle/fluid/transpiler/memory_optimization_transpiler.py
+10
-0
python/paddle/fluid/unique_name.py
python/paddle/fluid/unique_name.py
+1
-1
未找到文件。
python/paddle/fluid/__init__.py
浏览文件 @
b94f7848
...
@@ -44,7 +44,7 @@ import metrics
...
@@ -44,7 +44,7 @@ import metrics
import
transpiler
import
transpiler
from
param_attr
import
ParamAttr
,
WeightNormParamAttr
from
param_attr
import
ParamAttr
,
WeightNormParamAttr
from
data_feeder
import
DataFeeder
from
data_feeder
import
DataFeeder
from
core
import
LoDTensor
,
CPUPlace
,
CUDAPlace
,
CUDAPinnedPlace
from
core
import
LoDTensor
,
CPUPlace
,
CUDAPlace
,
CUDAPinnedPlace
,
Scope
from
transpiler
import
DistributeTranspiler
,
InferenceTranspiler
,
\
from
transpiler
import
DistributeTranspiler
,
InferenceTranspiler
,
\
memory_optimize
,
release_memory
memory_optimize
,
release_memory
from
concurrency
import
(
Go
,
make_channel
,
channel_send
,
channel_recv
,
from
concurrency
import
(
Go
,
make_channel
,
channel_send
,
channel_recv
,
...
@@ -83,6 +83,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + \
...
@@ -83,6 +83,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + \
'profiler'
,
'profiler'
,
'unique_name'
,
'unique_name'
,
'recordio_writer'
,
'recordio_writer'
,
'Scope'
,
]
]
...
...
python/paddle/fluid/executor.py
浏览文件 @
b94f7848
...
@@ -25,6 +25,13 @@ g_scope = core.Scope()
...
@@ -25,6 +25,13 @@ g_scope = core.Scope()
def
global_scope
():
def
global_scope
():
"""
Get the global/default scope instance. There are a lot of APIs use
:code:`global_scope` as its default value, e.g., :code:`Executor.run`
Returns:
Scope: The global/default scope instance.
"""
return
g_scope
return
g_scope
...
@@ -37,6 +44,19 @@ def switch_scope(scope):
...
@@ -37,6 +44,19 @@ def switch_scope(scope):
@
contextlib
.
contextmanager
@
contextlib
.
contextmanager
def
scope_guard
(
scope
):
def
scope_guard
(
scope
):
"""
Change the global/default scope instance by Python `with` statement. All
variable in runtime will assigned to the new scope.
Examples:
>>> import paddle.fluid as fluid
>>> new_scope = fluid.Scope()
>>> with fluid.scope_guard(new_scope):
>>> ...
Args:
scope: The new global/default scope.
"""
ex
=
switch_scope
(
scope
)
ex
=
switch_scope
(
scope
)
yield
yield
switch_scope
(
ex
)
switch_scope
(
ex
)
...
...
python/paddle/fluid/framework.py
浏览文件 @
b94f7848
...
@@ -30,8 +30,6 @@ __all__ = [
...
@@ -30,8 +30,6 @@ __all__ = [
'default_startup_program'
,
'default_startup_program'
,
'default_main_program'
,
'default_main_program'
,
'program_guard'
,
'program_guard'
,
'switch_startup_program'
,
'switch_main_program'
,
'get_var'
,
'get_var'
,
]
]
...
@@ -1238,23 +1236,18 @@ class Program(object):
...
@@ -1238,23 +1236,18 @@ class Program(object):
Notes: we have default_startup_program and default_main_program
Notes: we have default_startup_program and default_main_program
by default, a pair of them will shared the parameters.
by default, a pair of them will shared the parameters.
The default_startup_program only run once to initialize parameters,
The default_startup_program only run once to initialize parameters,
default_main_program run in every minibatch and adjust the weights.
default_main_program run in every mini batch and adjust the weights.
Args:
None
Returns:
Returns:
Python Program
A empty program.
Examples:
Examples:
.. code-block:: python
>>> main_program = fluid.Program()
>>> startup_program = fluid.Program()
main_program = Program()
>>> with fluid.program_guard(main_program=main_program, startup_program=startup_program):
startup_program = Program()
>>> fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
>>> fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
>>> fluid.layers.fc(name="fc", shape=[10], dtype='float32', act="relu")
fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
fluid.layers.fc(name="fc", shape=[10], dtype='float32', act="relu")
"""
"""
...
@@ -1268,6 +1261,19 @@ class Program(object):
...
@@ -1268,6 +1261,19 @@ class Program(object):
@
property
@
property
def
op_role
(
self
):
def
op_role
(
self
):
"""
The operator role. In a enum {Forward, Backward, Optimize}.
Notes: this is a low level API. It is used only for ParallelExecutor to
duplicate or schedule operator to devices.
For example, the forward operator should be executed on every device.
The backward operator should be executed on every device and the
parameter gradient of backward (use :code:`op_role_var` to get this
variable) operator should be merged to one device. The optimization
operators should be executed on only one device and broadcast the
optimization result, i.e., the new parameter, to every other device.
"""
return
self
.
_current_role
return
self
.
_current_role
@
op_role
.
setter
@
op_role
.
setter
...
@@ -1276,6 +1282,13 @@ class Program(object):
...
@@ -1276,6 +1282,13 @@ class Program(object):
@
property
@
property
def
op_role_var
(
self
):
def
op_role_var
(
self
):
"""
The auxiliary variables for :code:`op_role` property.
See Also: :code:`Program.op_role`'s documentation for details.
Notes: This is a very low-level API. Users should not use it directly.
"""
return
self
.
_op_role_var
return
self
.
_op_role_var
@
op_role_var
.
setter
@
op_role_var
.
setter
...
@@ -1284,6 +1297,21 @@ class Program(object):
...
@@ -1284,6 +1297,21 @@ class Program(object):
@
contextlib
.
contextmanager
@
contextlib
.
contextmanager
def
optimized_guard
(
self
,
var
):
def
optimized_guard
(
self
,
var
):
"""
A with guard to set :code:`Optimization` :code:`OpRole` and
:code:`OpRoleVar` automatically.
Notes: This is a very low level API. Users should not use it directly.
Args:
var(Variable|str): The variable (name) to be optimized.
Examples:
>>> p, g = backward(...)
>>> with program.optimized_guard(p):
>>> p = p - 0.001 * g
"""
OpRole
=
core
.
op_proto_and_checker_maker
.
OpRole
OpRole
=
core
.
op_proto_and_checker_maker
.
OpRole
self
.
_current_role
=
OpRole
.
Optimize
self
.
_current_role
=
OpRole
.
Optimize
self
.
_op_role_var
=
[
var
.
name
if
isinstance
(
var
,
Variable
)
else
var
]
self
.
_op_role_var
=
[
var
.
name
if
isinstance
(
var
,
Variable
)
else
var
]
...
@@ -1292,18 +1320,35 @@ class Program(object):
...
@@ -1292,18 +1320,35 @@ class Program(object):
self
.
_current_role
=
OpRole
.
Forward
self
.
_current_role
=
OpRole
.
Forward
def
__str__
(
self
):
def
__str__
(
self
):
"""
Get the protobuf debug string of this Program.
Returns:
(str): The protobuf debug string.
Raises:
ValueError: If any of required fields is not set.
"""
return
self
.
to_string
(
True
)
return
self
.
to_string
(
True
)
def
to_string
(
self
,
throw_on_error
,
with_details
=
False
):
def
to_string
(
self
,
throw_on_error
,
with_details
=
False
):
"""
"""
To debug string.
To debug string.
Args:
Args:
throw_on_error(bool): raise exception when self is not initialized
throw_on_error(bool): raise Value error when any of required fields
when throw_on_error is True
is not set.
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
with_details(bool): True if more details about variables and
parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need
to print.
Returns
(str): The debug string.
Raises:
ValueError: If any of required fields is not set and throw_on_error is
True.
"""
"""
assert
isinstance
(
throw_on_error
,
bool
)
and
isinstance
(
with_details
,
assert
isinstance
(
throw_on_error
,
bool
)
and
isinstance
(
with_details
,
...
@@ -1319,25 +1364,89 @@ class Program(object):
...
@@ -1319,25 +1364,89 @@ class Program(object):
return
res_str
return
res_str
def
get_desc
(
self
):
def
get_desc
(
self
):
"""
Get the C++ side of `ProgramDesc` object pointer. The C++ object is
exposed by :code:`pybind`.
Notes: This is a very low level API. Users should not use this API
directly.
"""
return
self
.
desc
return
self
.
desc
def
clone
(
self
,
for_test
=
False
):
def
clone
(
self
,
for_test
=
False
):
"""
Clone the Program object
"""
Args:
Create a new, duplicated program.
for_test(bool): indicate whether clone for test.
Set for_test to False when we want to clone the program for training.
Some operators, e.g., :code:`batch_norm`, behave differently between
Set for_test to True when we want to clone the program for testing.
training and testing. They have an attribute, :code:`is_test`, to
control this behaviour. This method will change the :code:`is_test`
attribute of them to :code:`True` when :code:`for_test=True`.
* Set for_test to False when we want to clone the program for training.
* Set for_test to True when we want to clone the program for testing.
Notes: This API DOES NOT prune any operator. Use
:code:`clone(for_test=True)` before backward and optimization please.
Args:
Args:
for_test(bool): Some operators, such as batch_norm and drop_out ops,
for_test(bool): True if change the :code:`is_test` attribute of
behave differently in training and testing. If for_test is True,
operators to :code:`True`.
the is_test attributes in these operators will be set to True for
testing purposes, otherwise, they remain unchanged.
Returns:
Returns:
Program: The cloned Program object.
Program: The new, duplicated Program object.
Examples:
1. To clone a test program, the sample code is:
>>> import paddle.fluid as fluid
>>> train_program = fluid.Program()
>>> startup_program = fluid.Program()
>>> with fluid.program_guard(train_program, startup_program):
>>> img = fluid.layers.data(name='image', shape=[784])
>>> hidden = fluid.layers.fc(input=img, size=200, act='relu')
>>> hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)
>>> loss = fluid.layers.cross_entropy(
>>> input=fluid.layers.fc(hidden, size=10, act='softmax'),
>>> label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
>>>
>>> test_program = train_program.clone(for_test=True)
>>>
>>> sgd = fluid.optimizer.SGD(learning_rate=1e-3)
>>> with fluid.program_guard(train_program, startup_program):
>>> sgd.minimize(loss)
2. The :code:`clone` method can be avoid if you create program for
training and program for testing individually.
>>> import paddle.fluid as fluid
>>>
>>> def network(is_test):
>>> img = fluid.layers.data(name='image', shape=[784])
>>> hidden = fluid.layers.fc(input=img, size=200, act='relu')
>>> hidden = fluid.layers.dropout(hidden, dropout_prob=0.5, is_test=is_test)
>>> loss = fluid.layers.cross_entropy(
>>> input=fluid.layers.fc(hidden, size=10, act='softmax'),
>>> label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
>>> return loss
>>>
>>> train_program = fluid.Program()
>>> startup_program = fluid.Program()
>>> test_program = fluid.Program()
>>>
>>> with fluid.program_guard(train_program, startup_program):
>>> with fluid.unique_name.guard():
>>> loss = network(is_test=False)
>>> sgd = fluid.optimizer.SGD(learning_rate=1e-3)
>>> sgd.minimize(loss)
>>>
>>> # the test startup program is not used.
>>> with fluid.program_guard(test_program, fluid.Program()):
>>> with fluid.unique_name.guard():
>>> loss = network(is_test=True)
The two code snippets above will generate same programs.
"""
"""
if
for_test
:
if
for_test
:
p
=
self
.
inference_optimize
()
p
=
self
.
inference_optimize
()
...
@@ -1352,6 +1461,21 @@ class Program(object):
...
@@ -1352,6 +1461,21 @@ class Program(object):
return
p
return
p
def
prune
(
self
,
targets
):
def
prune
(
self
,
targets
):
"""
Prune operators and variables which are not needed to generate
:code:`targets`.
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
targets(list|Variable|Operator): A list of variables or operators
need to be pruned
Returns:
Program: A new, pruned program.
"""
if
not
isinstance
(
targets
,
list
):
if
not
isinstance
(
targets
,
list
):
targets
=
[
targets
]
targets
=
[
targets
]
targets_idx
=
[]
targets_idx
=
[]
...
@@ -1386,6 +1510,17 @@ class Program(object):
...
@@ -1386,6 +1510,17 @@ class Program(object):
return
res
return
res
def
inference_optimize
(
self
):
def
inference_optimize
(
self
):
"""
This method will create a new program and change the :code:`is_test`
attribute of operators to :code:`True`. All the :code:`Parameter`
information will be lost.
Notes: This API is a very low level API. Use
:code:`Program.clone(for_test=True)` instead.
Returns:
Program: The new program.
"""
# this is an alternative implement before
# this is an alternative implement before
# core.inference_optimize being fixed.
# core.inference_optimize being fixed.
res
=
Program
()
res
=
Program
()
...
@@ -1402,6 +1537,18 @@ class Program(object):
...
@@ -1402,6 +1537,18 @@ class Program(object):
@
staticmethod
@
staticmethod
def
parse_from_string
(
binary_str
):
def
parse_from_string
(
binary_str
):
"""
Deserialize a program desc from protobuf binary string.
Notes: All information about parameters will be lost after serialization
and deserialization.
Args:
binary_str(str): The binary prootbuf string.
Returns:
Program: A deserialized program desc.
"""
p
=
Program
()
p
=
Program
()
p
.
desc
=
core
.
ProgramDesc
(
binary_str
)
p
.
desc
=
core
.
ProgramDesc
(
binary_str
)
p
.
blocks
=
[
Block
(
p
,
i
)
for
i
in
xrange
(
p
.
desc
.
num_blocks
())]
p
.
blocks
=
[
Block
(
p
,
i
)
for
i
in
xrange
(
p
.
desc
.
num_blocks
())]
...
@@ -1410,10 +1557,19 @@ class Program(object):
...
@@ -1410,10 +1557,19 @@ class Program(object):
@
property
@
property
def
random_seed
(
self
):
def
random_seed
(
self
):
"""
The default random seed for random operators in Program. Zero means get
the random seed from random device.
Notes: It must be set before the operators have been added.
"""
return
self
.
_seed
return
self
.
_seed
@
property
@
property
def
num_blocks
(
self
):
def
num_blocks
(
self
):
"""
The number of blocks in this program.
"""
return
self
.
desc
.
num_blocks
()
return
self
.
desc
.
num_blocks
()
@
random_seed
.
setter
@
random_seed
.
setter
...
@@ -1426,15 +1582,40 @@ class Program(object):
...
@@ -1426,15 +1582,40 @@ class Program(object):
return
str
(
self
)
return
str
(
self
)
def
global_block
(
self
):
def
global_block
(
self
):
"""
Get the first block of this program.
"""
return
self
.
blocks
[
0
]
return
self
.
blocks
[
0
]
def
block
(
self
,
index
):
def
block
(
self
,
index
):
"""
Get the :code:`index` block of this program
Args:
index(int): The index of block to get
Returns:
Block: The :code:`index` block
"""
return
self
.
blocks
[
index
]
return
self
.
blocks
[
index
]
def
current_block
(
self
):
def
current_block
(
self
):
"""
Get the current block. The :code:`current` block is the block to append
operators.
"""
return
self
.
blocks
[
self
.
current_block_idx
]
return
self
.
blocks
[
self
.
current_block_idx
]
def
create_block
(
self
,
parent_idx
=
None
):
def
create_block
(
self
,
parent_idx
=
None
):
"""
Create a new block with the :code:`parent_idx` and change the current block
to new block.
Args:
parent_idx(int): The parent block index.
Returns:
Block: The new block.
"""
new_block_idx
=
len
(
self
.
blocks
)
new_block_idx
=
len
(
self
.
blocks
)
parent
=
self
.
current_block
()
if
parent_idx
is
None
else
self
.
block
(
parent
=
self
.
current_block
()
if
parent_idx
is
None
else
self
.
block
(
parent_idx
)
parent_idx
)
...
@@ -1444,9 +1625,24 @@ class Program(object):
...
@@ -1444,9 +1625,24 @@ class Program(object):
return
self
.
current_block
()
return
self
.
current_block
()
def
rollback
(
self
):
def
rollback
(
self
):
"""
Exit a code block, i.e., roll back to the parent block.
Returns:
None
"""
self
.
current_block_idx
=
self
.
current_block
().
parent_idx
self
.
current_block_idx
=
self
.
current_block
().
parent_idx
def
sync_with_cpp
(
self
):
def
sync_with_cpp
(
self
):
"""
Synchronize Python instance to its binding C++ object instance.
If the program is modified in C++ space, this method should be invoked.
Notes: This is a very low level API. Users should not invoke it
directly.
Returns:
None
"""
for
block_idx
in
range
(
len
(
self
.
blocks
),
self
.
desc
.
num_blocks
()):
for
block_idx
in
range
(
len
(
self
.
blocks
),
self
.
desc
.
num_blocks
()):
self
.
blocks
.
append
(
Block
(
self
,
block_idx
))
self
.
blocks
.
append
(
Block
(
self
,
block_idx
))
for
block
in
self
.
blocks
:
for
block
in
self
.
blocks
:
...
@@ -1456,6 +1652,9 @@ class Program(object):
...
@@ -1456,6 +1652,9 @@ class Program(object):
"""
"""
Copy the information of parameters from other program.
Copy the information of parameters from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
Args:
other(Program): Other program
other(Program): Other program
...
@@ -1475,6 +1674,9 @@ class Program(object):
...
@@ -1475,6 +1674,9 @@ class Program(object):
"""
"""
Copy the information of data variables from other program.
Copy the information of data variables from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
Args:
other(Program): Other program
other(Program): Other program
...
@@ -1493,6 +1695,12 @@ class Program(object):
...
@@ -1493,6 +1695,12 @@ class Program(object):
self
.
global_block
().
var
(
var
.
name
).
is_data
=
True
self
.
global_block
().
var
(
var
.
name
).
is_data
=
True
def
list_vars
(
self
):
def
list_vars
(
self
):
"""
Get all variables from this Program. A iterable object is returned.
Returns:
iterable: The generator will yield every variable in this program.
"""
for
each_block
in
self
.
blocks
:
for
each_block
in
self
.
blocks
:
for
each_var
in
each_block
.
vars
.
itervalues
():
for
each_var
in
each_block
.
vars
.
itervalues
():
yield
each_var
yield
each_var
...
@@ -1584,8 +1792,15 @@ _startup_program_ = Program()
...
@@ -1584,8 +1792,15 @@ _startup_program_ = Program()
def
default_startup_program
():
def
default_startup_program
():
"""
"""
Get default startup program. In startup program, Paddle will initialize
Get default/global startup program.
parameters, initialize nccl handle, etc.
The layer function in :code:`fluid.layers` will create parameters, readers,
NCCL handles as global variables. The :code:`startup_program` will
initialize them by the operators in startup program. The layer function will
append these initialization operators into startup program.
This method will return the :code:`default` or the :code:`current` startup
program. Users can use :code:`fluid.program_guard` to switch program.
Returns:
Returns:
Program: startup program
Program: startup program
...
@@ -1595,7 +1810,15 @@ def default_startup_program():
...
@@ -1595,7 +1810,15 @@ def default_startup_program():
def
default_main_program
():
def
default_main_program
():
"""
"""
Get default main program. The main program is used for training or testing.
Get default/global main program. The main program is used for training or
testing.
All layer function in :code:`fluid.layers` will append operators and
variables to the :code:`default_main_program`.
The :code:`default_main_program` is the default program in a lot of APIs.
For example, the :code:`Executor.run()` will execute the
:code:`default_main_program` when the program is not specified.
Returns:
Returns:
Program: main program
Program: main program
...
@@ -1637,20 +1860,34 @@ def switch_startup_program(program):
...
@@ -1637,20 +1860,34 @@ def switch_startup_program(program):
@
contextlib
.
contextmanager
@
contextlib
.
contextmanager
def
program_guard
(
main_program
,
startup_program
=
None
):
def
program_guard
(
main_program
,
startup_program
=
None
):
"""
"""
Switch program with `with` statement
Change the global main program and startup program with `with` statement.
Layer functions in the Python `with` block will append operators and
variables to the new main programs.
Examples:
Examples:
>>> with program_guard(Program()):
>>> data = fluid.layers.data(...)
>>> import paddle.fluid as fluid
>>> hidden = fluid.layers.fc(...)
>>> main_program = fluid.Program()
>>> startup_program = fluid.Program()
>>> with fluid.program_guard(main_program, startup_program):
>>> data = fluid.layers.data(...)
>>> hidden = fluid.layers.fc(...)
Notes: The temporary :code:`Program` can be used if the user does not need
to construct either of startup program or main program.
Examples:
>>> import paddle.fluid as fluid
>>> main_program = fluid.Program()
>>> # does not care about startup program. Just pass a temporary value.
>>> with fluid.program_guard(main_program, fluid.Program()):
>>> data = ...
Args:
Args:
main_program(Program): New main program inside `with` statement
main_program(Program): New main program inside `with` statement
.
startup_program(Program): New startup program inside `with` statement.
startup_program(Program): New startup program inside `with` statement.
None means do not change startup program.
None means do not change startup program.
Returns:
None
"""
"""
if
not
isinstance
(
main_program
,
Program
):
if
not
isinstance
(
main_program
,
Program
):
raise
TypeError
(
"main_program should be Program"
)
raise
TypeError
(
"main_program should be Program"
)
...
@@ -1667,7 +1904,8 @@ def program_guard(main_program, startup_program=None):
...
@@ -1667,7 +1904,8 @@ def program_guard(main_program, startup_program=None):
def
get_var
(
name
,
program
=
None
):
def
get_var
(
name
,
program
=
None
):
"""
"""
Get a variable by name from the global block of a program
Get a variable by name from the global block of a program.
Args:
Args:
name(str): name of the variable
name(str): name of the variable
program(Program|None): program object.
program(Program|None): program object.
...
...
python/paddle/fluid/lod_tensor.py
浏览文件 @
b94f7848
...
@@ -19,33 +19,41 @@ __all__ = ['create_lod_tensor', 'create_random_int_lodtensor']
...
@@ -19,33 +19,41 @@ __all__ = ['create_lod_tensor', 'create_random_int_lodtensor']
def
create_lod_tensor
(
data
,
lod
,
place
):
def
create_lod_tensor
(
data
,
lod
,
place
):
"""Create a lod tensor from a numpy array, a list, or an existing lod tensor.
"""
Create a lod tensor from a numpy array, a list, or an existing lod tensor.
Create a lod tensor by doing the following:
Create a lod tensor by doing the following:
1. Check that the length-based input lod is valid.
1. Check that the length-based input lod is valid.
2. Convert the length-based lod to a offset-based LoD.
2. Convert the length-based lod to a offset-based LoD.
3. Copy the data from a numpy array, a list or a existing lod tensor to
3. Copy the data from a numpy array, a list or a existing lod tensor to
CPU or GPU device (based on input place).
CPU or GPU device (based on input place).
4. Set the level of detail (LoD) using the offset-based LoD.
4. Set the level of detail (LoD) using the offset-based LoD.
Use example:
Examples:
Suppose we want LoDTensor to hold data for sequences of word, where each word is
represented by an integer. If we want to create a LoDTensor to represent two
sentences, one of 2 words, and one of 3 words.
Then 'data' can be a numpy array of integers with shape (5, 1).
Suppose we want LoDTensor to hold data for sequences of word, where each
'lod' will be [[2, 3]], indicating the length(# of words) in each sentence.
word is represented by an integer. If we want to create a LoDTensor to
This length-based input lod [[2, 3]] will be converted to offset-based lod [[0, 2, 5]]
represent two sentences, one of 2 words, and one of 3 words.
inside the function call.
Please refer to
Then :code:`data` can be a numpy array of integers with shape (5, 1).
github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/lod_tensor.md
:code:`lod` will be [[2, 3]], indicating the length(# of words) in each
for more details regarding LoD.
sentence. This length-based input lod [[2, 3]] will be converted to
offset-based lod [[0, 2, 5]] inside the function call.
Please reference :ref:`api_guide_low_level_lod_tensor` for more details
regarding LoD.
Args:
Args:
data: a numpy array or a LoDTensor or a list holding the data to be copied.
data(numpy.ndarray|list|LoDTensor): a numpy array or a LoDTensor or a
lod: a list of lists indicating the length-based LoD info specified by the user.
list holding the data to be copied.
place: CPU or GPU place indicating where the data in the new LoDTensor will be stored.
lod(list): a list of lists indicating the length-based LoD info
specified by the user.
place(Place): CPU or GPU place indicating where the data in the new
LoDTensor will be stored.
Returns:
Returns:
A fluid LoDTensor object with tensor data and lod info.
A fluid LoDTensor object with tensor data and lod info.
...
@@ -77,31 +85,38 @@ def create_lod_tensor(data, lod, place):
...
@@ -77,31 +85,38 @@ def create_lod_tensor(data, lod, place):
def
create_random_int_lodtensor
(
lod
,
base_shape
,
place
,
low
,
high
):
def
create_random_int_lodtensor
(
lod
,
base_shape
,
place
,
low
,
high
):
"""Create a LoDTensor containing random integers.
"""
Create a LoDTensor containing random integers.
This function is frequently used in the book examples. So we revised it
based on
This function is frequently used in the book examples. So we revised it
the new create_lod_tensor API and put it here in the lod_tensor module to simplify
based on the new create_lod_tensor API and put it here in the lod_tensor
the code.
module to simplify the code.
The function does the following:
The function does the following:
1. Calculate the overall shape of the LoDTensor based on the length-based 'lod' input
and the shape of the basic element in 'base_shape'.
1. Calculate the overall shape of the LoDTensor based on the length-based
:code:`lod` input and the shape of the basic element in
:code:`base_shape`.
2. Create a numpy array of this shape.
2. Create a numpy array of this shape.
3. Create the LoDTensor using create_lod_tensor API.
3. Create the LoDTensor using create_lod_tensor API.
Suppose we want LoDTensor to hold data for sequences of word, where each
word is
Suppose we want LoDTensor to hold data for sequences of word, where each
represented by an integer. If we want to create a LoDTensor to represent two
word is represented by an integer. If we want to create a LoDTensor to
sentences, one of 2 words, and one of 3 words. Then 'base_shape' is [1], input
represent two sentences, one of 2 words, and one of 3 words. Then
length-based 'lod' is [[2, 3]]. Then the overall shape of the LoDTensor would be
'base_shape' is [1], input length-based 'lod' is [[2, 3]]. Then the overall
[5, 1], holding 5 words for two sentences.
shape of the LoDTensor would be [5, 1], holding 5 words for two sentences.
Args:
Args:
data: a numpy array or a LoDTensor holding the data to be copied.
lod(list): a list of lists indicating the length-based LoD info
lod: a list of lists indicating the length-based LoD info specified by the user.
specified by the user.
base_shape: the shape of the basic element to be held by the LoDTensor.
base_shape(list): the shape of the basic element to be held by the
place: CPU or GPU place indicating where the data in the new LoDTensor will be stored.
LoDTensor.
low: the lower bound of the random integers.
place(Place): CPU or GPU place indicating where the data in the new
high: the upper bound of the random integers.
LoDTensor will be stored.
low(int): the lower bound of the random integers.
high(int): the upper bound of the random integers.
Returns:
Returns:
A fluid LoDTensor object with tensor data and lod info.
A fluid LoDTensor object with tensor data and lod info.
...
...
python/paddle/fluid/recordio_writer.py
浏览文件 @
b94f7848
...
@@ -36,6 +36,45 @@ def convert_reader_to_recordio_file(
...
@@ -36,6 +36,45 @@ def convert_reader_to_recordio_file(
compressor
=
core
.
RecordIOWriter
.
Compressor
.
Snappy
,
compressor
=
core
.
RecordIOWriter
.
Compressor
.
Snappy
,
max_num_records
=
1000
,
max_num_records
=
1000
,
feed_order
=
None
):
feed_order
=
None
):
"""
Convert a Python Reader to a recordio file.
Please see :ref:`api_guide_python_reader` and :ref:`api_guide_reader_op` for
details.
Examples:
>>> import paddle.fluid as fluid
>>> import paddle.dataset.mnist as mnist
>>> import paddle
>>>
>>> tmp_program = fluid.Program()
>>> with fluid.program_guard(tmp_program):
>>> img = fluid.layers.data(name='img', shape=[784])
>>> label = fluid.layers.data(name='label', shape=[1], dtype='int64')
>>> feeder = fluid.DataFeeder(feed_list=[img, label], place=fluid.CPUPlace())
>>> # mnist.recordio will be generated in current directory
>>> fluid.recordio_writer.convert_reader_to_recordio_file(
>>> filename="mnist.recordio",
>>> reader_creator=paddle.batch(mnist.train(), batch_size=32),
>>> feeder=feeder)
Args:
filename(str): The recordio filename.
reader_creator(callable): The Python Reader Creator. See
:ref:`api_guide_python_reader`.
feeder(DataFeeder): The DataFeeder instance. Used to convert
:code:`reader_creator` to :code: `lod_tensor`
compressor: Must in fluid.core.RecordIOWriter.Compressor.Snappy or
fluid.core.RecordIOWriter.Compressor.NoCompress. Use :code:`Snappy`
by default.
max_num_records(int): Maximum number of records in one chuck. Each record
is each return value from reader function
feed_order(list): The order of variable names that the reader returns
Returns:
int: the number of record that saved.
"""
if
feed_order
is
None
:
if
feed_order
is
None
:
feed_order
=
feeder
.
feed_names
feed_order
=
feeder
.
feed_names
counter
=
0
counter
=
0
...
@@ -58,6 +97,17 @@ def convert_reader_to_recordio_files(
...
@@ -58,6 +97,17 @@ def convert_reader_to_recordio_files(
compressor
=
core
.
RecordIOWriter
.
Compressor
.
Snappy
,
compressor
=
core
.
RecordIOWriter
.
Compressor
.
Snappy
,
max_num_records
=
1000
,
max_num_records
=
1000
,
feed_order
=
None
):
feed_order
=
None
):
"""
convert a python reader to many recordio files.
This API is basically same as :code:`convert_reader_to_recordio_file`,
instead of it will create many recordio files. Each file contains at
most :code:`batch_per_file` records.
Please reference
:ref:`api_fluid_recordio_writer_convert_reader_to_recordio_file` for more
details.
"""
if
feed_order
is
None
:
if
feed_order
is
None
:
feed_order
=
feeder
.
feed_names
feed_order
=
feeder
.
feed_names
f_name
,
f_ext
=
os
.
path
.
splitext
(
filename
)
f_name
,
f_ext
=
os
.
path
.
splitext
(
filename
)
...
...
python/paddle/fluid/trainer.py
浏览文件 @
b94f7848
...
@@ -33,23 +33,59 @@ __all__ = [
...
@@ -33,23 +33,59 @@ __all__ = [
class
BeginEpochEvent
(
object
):
class
BeginEpochEvent
(
object
):
"""
The begin of a training epoch.
Args:
epoch_id(int): The current epoch ID.
"""
def
__init__
(
self
,
epoch_id
):
def
__init__
(
self
,
epoch_id
):
self
.
epoch
=
epoch_id
self
.
epoch
=
epoch_id
class
EndEpochEvent
(
object
):
class
EndEpochEvent
(
object
):
"""
The end of a training epoch.
Args:
epoch_id(int): The current epoch ID.
"""
def
__init__
(
self
,
epoch_id
):
def
__init__
(
self
,
epoch_id
):
self
.
epoch
=
epoch_id
self
.
epoch
=
epoch_id
class
BeginStepEvent
(
object
):
class
BeginStepEvent
(
object
):
"""
The begin of a training epoch.
Args:
epoch_id(int): The current epoch ID.
step_id(int): The current step ID.
"""
def
__init__
(
self
,
epoch_id
,
step_id
):
def
__init__
(
self
,
epoch_id
,
step_id
):
self
.
epoch
=
epoch_id
self
.
epoch
=
epoch_id
self
.
step
=
step_id
self
.
step
=
step_id
self
.
fetch_metrics
=
True
self
.
fetch_metrics
=
True
"""
If fetch_metrics is true, the metrics will be fetched at the
EndStepEvent. Default is True.
"""
class
EndStepEvent
(
object
):
class
EndStepEvent
(
object
):
"""
The end of a training step.
Args:
epoch_id(int): The current epoch ID.
step_id(int): The current step ID.
metrics(list): A list of fetched tensor. The order of this list is same
as the :code:`train_func` returns.
"""
def
__init__
(
self
,
epoch_id
,
step_id
,
metrics
):
def
__init__
(
self
,
epoch_id
,
step_id
,
metrics
):
self
.
epoch
=
epoch_id
self
.
epoch
=
epoch_id
self
.
step
=
step_id
self
.
step
=
step_id
...
@@ -57,6 +93,27 @@ class EndStepEvent(object):
...
@@ -57,6 +93,27 @@ class EndStepEvent(object):
class
CheckpointConfig
(
object
):
class
CheckpointConfig
(
object
):
"""
Parameter object for :code:`fluid.io.save_checkpoint` and
:code:`fluid.Trainer`. Used to configuration how to save checkpoint.
Args:
checkpoint_dir(str): Directory path to save check point. Default is the
current directory.
max_num_checkpoints(int): The max number of local check points.
epoch_interval(int): Every number of epoch to save check point.
step_interval(int): Every number of step to save check point.
Examples:
>>> config = fluid.CheckpointConfig("./checkpoints")
>>> trainer = fluid.Trainer(train_func=train_program,
>>> place=place,
>>> optimizer_func=optimizer_func,
>>> checkpoint_config=config)
>>> trainer.train(...)
"""
def
__init__
(
self
,
def
__init__
(
self
,
checkpoint_dir
=
None
,
checkpoint_dir
=
None
,
max_num_checkpoints
=
3
,
max_num_checkpoints
=
3
,
...
@@ -113,11 +170,62 @@ def check_and_get_place(place):
...
@@ -113,11 +170,62 @@ def check_and_get_place(place):
class
Trainer
(
object
):
class
Trainer
(
object
):
"""
"""
A trainer wraps MultiGPU/MultiNode training loops and can be used to train a
simple neural network easily.
This API takes a :code:`train_func`. A :code:`train_func` is a function that
return loss as it first return value. The reset value can be fetched by
EndStepEvent.metrics
This API also takes a :code:`optimizer_func` that will return an optimizer
instance.
For example, to train a MLP for MNIST dataset, the sample program is
>>> import paddle.fluid as fluid
>>>
>>> def mlp(image, layer_sizes=[200, 100], activation="relu", num_classes=10):
>>> hidden = image
>>> for layer_size in layer_sizes:
>>> hidden = fluid.layers.fc(input=hidden, size=layer_size, act=activation)
>>> return fluid.layers.fc(input=hidden, size=num_classes, act="softmax")
>>>
>>> def train_mnist_mlp():
>>> img = fluid.layers.data(name='image', shape=[784])
>>> label = fluid.layers.data(name='label', shape=[1], dtype='int64')
>>> prediction = mlp(img)
>>> return fluid.layers.mean(fluid.layers.cross_entropy(prediction, label))
>>>
>>> def optimizer():
>>> return fluid.optimizer.Adam()
>>>
>>> trainer = Trainer(train_func=train_mnist_mlp,
>>> optimizer_func=optimizer,
>>> place=fluid.CUDAPlace(0),
>>> parallel=True)
>>>
>>> def train_callback(event):
>>> if isinstance(event, fluid.EndStepEvent):
>>> print "Epoch ID", event.epoch, "Step ID",
\
>>> event.step, "AvgLoss", event.metrics[0]
>>> elif isinstance(event, fluid.EndEpochEvent):
>>> trainer.save_params("./model_{0}".format(event.epoch))
>>>
>>> trainer.train(num_epochs=100, event_handler=train_callback)
For more example, please see :ref:`api_guide_high_level_api`.
Args:
Args:
train_func(callable): A function which will return loss. The loss must be a scalar.
train_func(callable): A function which will return loss. The loss must be
a scalar tensor.
optimizer_func(callable): A function that returns an Optimizer object.
optimizer_func(callable): A function that returns an Optimizer object.
place: The device place of this trainer.
place(CUDAPlace|CPUPlace): The device place of this trainer. If
:code:`parallel=True,` all CUDA Places will be used if :code:`place`
is a :code:`CUDAPlace`.
parallel(bool): True if use multiple devices.
checkpoint_config(CheckpointConfig): Configuration about how to save
checkpoints.
"""
"""
def
__init__
(
self
,
def
__init__
(
self
,
...
@@ -129,9 +237,6 @@ class Trainer(object):
...
@@ -129,9 +237,6 @@ class Trainer(object):
checkpoint_config
=
None
):
checkpoint_config
=
None
):
self
.
__stop
=
False
self
.
__stop
=
False
self
.
parallel
=
parallel
self
.
parallel
=
parallel
# 1. we need to generate a framework.Program by calling
# program_func. Reference: fluid.program_guard in
# test_word2vec.py
# config for checkpoint
# config for checkpoint
# only chief worker will save variables
# only chief worker will save variables
...
@@ -145,6 +250,10 @@ class Trainer(object):
...
@@ -145,6 +250,10 @@ class Trainer(object):
self
.
scope
=
core
.
Scope
()
self
.
scope
=
core
.
Scope
()
# 1. we need to generate a framework.Program by calling
# program_func. Reference: fluid.program_guard in
# test_word2vec.py
self
.
startup_program
=
framework
.
Program
()
self
.
startup_program
=
framework
.
Program
()
self
.
train_program
=
framework
.
Program
()
self
.
train_program
=
framework
.
Program
()
...
@@ -277,17 +386,18 @@ class Trainer(object):
...
@@ -277,17 +386,18 @@ class Trainer(object):
def
train
(
self
,
num_epochs
,
event_handler
,
reader
=
None
,
feed_order
=
None
):
def
train
(
self
,
num_epochs
,
event_handler
,
reader
=
None
,
feed_order
=
None
):
"""
"""
T
rain the model.
Start the train loop to t
rain the model.
Args:
Args:
num_epochs: The number of epoch. An epoch will process all data in reader
num_epochs(int): The number of epoch. An epoch will process all data in reader
event_handler: The event handler. A function with type (ev:Event)->void
event_handler(callable): The event handler. A function with type (ev:Event)->void
reader:
reader(callable): A reader creator object. See also
feed_order: Feeding order of reader. None will following the defining
:ref:`api_guide_python_reader` .
feed_order(list): Feeding order of reader. None will following the defining
order in program
order in program
Returns:
Returns:
None
"""
"""
training_role
=
os
.
getenv
(
"PADDLE_TRAINING_ROLE"
,
""
)
training_role
=
os
.
getenv
(
"PADDLE_TRAINING_ROLE"
,
""
)
if
training_role
==
"PSERVER"
:
if
training_role
==
"PSERVER"
:
...
@@ -307,16 +417,24 @@ class Trainer(object):
...
@@ -307,16 +417,24 @@ class Trainer(object):
Test the model on given test data
Test the model on given test data
Args:
Args:
reader: The reader that yields test data.
reader
(callable)
: The reader that yields test data.
feed_order
: Feeding order of reader. None will following the defining
feed_order
(list): Feeding order of reader. None will following the
order in program
defining
order in program
"""
"""
return
self
.
_test_by_executor
(
reader
,
feed_order
,
return
self
.
_test_by_executor
(
reader
,
feed_order
,
self
.
train_func_outputs
)
self
.
train_func_outputs
)
def
save_params
(
self
,
param_path
):
def
save_params
(
self
,
param_path
):
# reference: save_persistables in io.py
"""
Save all parameters into :code:`param_path`.
Args:
param_path(str): The path to save parameters.
Returns:
None
"""
with
self
.
_prog_and_scope_guard
():
with
self
.
_prog_and_scope_guard
():
exe
=
executor
.
Executor
(
self
.
place
)
exe
=
executor
.
Executor
(
self
.
place
)
io
.
save_persistables
(
exe
,
dirname
=
param_path
)
io
.
save_persistables
(
exe
,
dirname
=
param_path
)
...
...
python/paddle/fluid/transpiler/memory_optimization_transpiler.py
浏览文件 @
b94f7848
...
@@ -383,6 +383,16 @@ def memory_optimize(input_program, skip_opt_set=None, print_log=False, level=0):
...
@@ -383,6 +383,16 @@ def memory_optimize(input_program, skip_opt_set=None, print_log=False, level=0):
def
release_memory
(
input_program
,
skip_opt_set
=
None
):
def
release_memory
(
input_program
,
skip_opt_set
=
None
):
"""
Modify the input program and insert :code:`delete_op` to early drop not used
variables. The modification will be performed inplace.
Notes: This is an experimental API and could be removed in next few
releases. Users should not use this API.
Args:
input_program(Program): The program will be inserted :code:`delete_op`.
"""
cfgs
=
_get_cfgs
(
input_program
)
cfgs
=
_get_cfgs
(
input_program
)
for
cfg
in
cfgs
:
for
cfg
in
cfgs
:
cfg
.
release_memory
(
skip_opt_set
=
skip_opt_set
)
cfg
.
release_memory
(
skip_opt_set
=
skip_opt_set
)
python/paddle/fluid/unique_name.py
浏览文件 @
b94f7848
...
@@ -16,7 +16,7 @@ import collections
...
@@ -16,7 +16,7 @@ import collections
import
contextlib
import
contextlib
import
sys
import
sys
__all__
=
[
'generate'
,
'switch'
,
'guard'
,
'UniqueNameGenerator'
]
__all__
=
[
'generate'
,
'switch'
,
'guard'
]
class
UniqueNameGenerator
(
object
):
class
UniqueNameGenerator
(
object
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录