提交 ce4eba3b 编写于 作者: M minqiyang

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into port_python3_syntax

...@@ -170,6 +170,7 @@ paddle.fluid.layers.mean_iou ArgSpec(args=['input', 'label', 'num_classes'], var ...@@ -170,6 +170,7 @@ paddle.fluid.layers.mean_iou ArgSpec(args=['input', 'label', 'num_classes'], var
paddle.fluid.layers.relu ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.relu ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.log ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.log ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.crop ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.layers.crop ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.layers.rank_loss ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
paddle.fluid.layers.open_recordio_file ArgSpec(args=['filename', 'shapes', 'lod_levels', 'dtypes', 'pass_num', 'for_parallel'], varargs=None, keywords=None, defaults=(1, True)) paddle.fluid.layers.open_recordio_file ArgSpec(args=['filename', 'shapes', 'lod_levels', 'dtypes', 'pass_num', 'for_parallel'], varargs=None, keywords=None, defaults=(1, True))
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
...@@ -201,7 +202,6 @@ paddle.fluid.layers.zeros ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs= ...@@ -201,7 +202,6 @@ paddle.fluid.layers.zeros ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=
paddle.fluid.layers.reverse ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.reverse ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.While.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.While.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.While.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.While.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.While.complete ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.Switch.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.Switch.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.Switch.case ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.Switch.case ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.Switch.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.Switch.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
...@@ -225,17 +225,14 @@ paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs= ...@@ -225,17 +225,14 @@ paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=
paddle.fluid.layers.DynamicRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.DynamicRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.update_memory ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.DynamicRNN.update_memory ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.StaticRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.StaticRNN.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.memory ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1)) paddle.fluid.layers.StaticRNN.memory ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1))
paddle.fluid.layers.StaticRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.step ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.step ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.step_output ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.step_output ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.update_memory ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.update_memory ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.reorder_lod_tensor_by_rank ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.reorder_lod_tensor_by_rank ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ParallelDo.__init__ ArgSpec(args=['self', 'places', 'use_nccl', 'name'], varargs=None, keywords=None, defaults=(False, None)) paddle.fluid.layers.ParallelDo.__init__ ArgSpec(args=['self', 'places', 'use_nccl', 'name'], varargs=None, keywords=None, defaults=(False, None))
paddle.fluid.layers.ParallelDo.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ParallelDo.do ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.ParallelDo.do ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ParallelDo.get_parameters ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.ParallelDo.get_parameters ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ParallelDo.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.ParallelDo.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
......
...@@ -21,6 +21,7 @@ from ..layer_helper import LayerHelper, unique_name ...@@ -21,6 +21,7 @@ from ..layer_helper import LayerHelper, unique_name
from ..initializer import force_init_on_cpu from ..initializer import force_init_on_cpu
from .ops import logical_and, logical_not, logical_or from .ops import logical_and, logical_not, logical_or
import numpy import numpy
import warnings
from functools import reduce from functools import reduce
__all__ = [ __all__ = [
...@@ -276,11 +277,14 @@ class ParallelDo(object): ...@@ -276,11 +277,14 @@ class ParallelDo(object):
avg_cost = fluid.layers.mean(x=cost) avg_cost = fluid.layers.mean(x=cost)
.. warning:: .. warning::
It will be soon deprecated, please use ParallelExecutor instead. It will be soon deprecated, please use ParallelExecutor instead.
""" """
def __init__(self, places, use_nccl=False, name=None): def __init__(self, places, use_nccl=False, name=None):
warnings.warn(
"API ParallelDo is deprecated since 0.15.0. Please use ParallelExecutor instead.",
Warning)
self.helper = LayerHelper("parallel_do", name=name) self.helper = LayerHelper("parallel_do", name=name)
self.inputs = [] self.inputs = []
self.places = places self.places = places
...@@ -339,7 +343,7 @@ class ParallelDo(object): ...@@ -339,7 +343,7 @@ class ParallelDo(object):
return [parent_block.var(name) for name in params] return [parent_block.var(name) for name in params]
def complete_op(self): def _complete_op(self):
main_program = self.helper.main_program main_program = self.helper.main_program
current_block = main_program.current_block() current_block = main_program.current_block()
parent_block = self.parent_block() parent_block = self.parent_block()
...@@ -395,7 +399,7 @@ class BlockGuardWithCompletion(BlockGuard): ...@@ -395,7 +399,7 @@ class BlockGuardWithCompletion(BlockGuard):
if exc_type is not None: if exc_type is not None:
return False return False
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
self.rnn.complete_op() self.rnn._complete_op()
return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val, return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val,
exc_tb) exc_tb)
...@@ -471,7 +475,7 @@ class StaticRNN(object): ...@@ -471,7 +475,7 @@ class StaticRNN(object):
if shape is None or batch_ref is None: if shape is None or batch_ref is None:
raise ValueError( raise ValueError(
"if init is None, memory at least need shape and batch_ref") "if init is None, memory at least need shape and batch_ref")
parent_block = self.parent_block() parent_block = self._parent_block()
var_name = unique_name.generate("@".join( var_name = unique_name.generate("@".join(
[self.helper.name, "memory_boot"])) [self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var( boot_var = parent_block.create_var(
...@@ -528,7 +532,7 @@ class StaticRNN(object): ...@@ -528,7 +532,7 @@ class StaticRNN(object):
outputs={'Out': tmp_o}, outputs={'Out': tmp_o},
attrs={'dtype': o.dtype}) attrs={'dtype': o.dtype})
out_var = self.parent_block().create_var( out_var = self._parent_block().create_var(
name=tmp_o.name, name=tmp_o.name,
shape=[self.seq_len] + list(tmp_o.shape), shape=[self.seq_len] + list(tmp_o.shape),
dtype=tmp_o.dtype) dtype=tmp_o.dtype)
...@@ -544,7 +548,7 @@ class StaticRNN(object): ...@@ -544,7 +548,7 @@ class StaticRNN(object):
raise TypeError("update memory should take variables") raise TypeError("update memory should take variables")
self.memories[mem.name].mem = var self.memories[mem.name].mem = var
def parent_block(self): def _parent_block(self):
prog = self.helper.main_program prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0 assert parent_idx >= 0
...@@ -561,10 +565,10 @@ class StaticRNN(object): ...@@ -561,10 +565,10 @@ class StaticRNN(object):
else: else:
return self.outputs return self.outputs
def complete_op(self): def _complete_op(self):
main_program = self.helper.main_program main_program = self.helper.main_program
rnn_block = main_program.current_block() rnn_block = main_program.current_block()
parent_block = self.parent_block() parent_block = self._parent_block()
local_inputs = set() local_inputs = set()
...@@ -644,7 +648,7 @@ class WhileGuard(BlockGuard): ...@@ -644,7 +648,7 @@ class WhileGuard(BlockGuard):
if exc_type is not None: if exc_type is not None:
return False return False
self.while_op.status = While.AFTER_WHILE_BLOCK self.while_op.status = While.AFTER_WHILE_BLOCK
self.while_op.complete() self.while_op._complete()
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb) return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
...@@ -691,7 +695,7 @@ class While(object): ...@@ -691,7 +695,7 @@ class While(object):
def block(self): def block(self):
return WhileGuard(self) return WhileGuard(self)
def complete(self): def _complete(self):
main_program = self.helper.main_program main_program = self.helper.main_program
while_block = main_program.current_block() while_block = main_program.current_block()
parent_block = main_program.block(main_program.current_block() parent_block = main_program.block(main_program.current_block()
...@@ -816,21 +820,21 @@ def max_sequence_len(rank_table): ...@@ -816,21 +820,21 @@ def max_sequence_len(rank_table):
def lod_tensor_to_array(x, table): def lod_tensor_to_array(x, table):
""" """
Convert a LoDTensor to a LoDTensorArray. Convert a LoDTensor to a LoDTensorArray.
This function split a LoDTesnor to a LoDTensorArray according to its LoD This function split a LoDTesnor to a LoDTensorArray according to its LoD
information. LoDTensorArray is an alias of C++ std::vector<LoDTensor> in information. LoDTensorArray is an alias of C++ std::vector<LoDTensor> in
PaddlePaddle. The generated LoDTensorArray of this function can be further read PaddlePaddle. The generated LoDTensorArray of this function can be further read
or written by `read_from_array()` and `write_to_array()` operators. However, or written by `read_from_array()` and `write_to_array()` operators. However,
this function is generally an internal component of PaddlePaddle `DynamicRNN`. this function is generally an internal component of PaddlePaddle `DynamicRNN`.
Users should not use it directly. Users should not use it directly.
Args: Args:
x (Variable|list): The LoDTensor to be converted to a LoDTensorArray. x (Variable|list): The LoDTensor to be converted to a LoDTensorArray.
table (ParamAttr|list): The variable that stores the level of lod table (ParamAttr|list): The variable that stores the level of lod
which is ordered by sequence length in which is ordered by sequence length in
descending order. It is generally generated descending order. It is generally generated
by `layers.lod_rank_table()` API. by `layers.lod_rank_table()` API.
Returns: Returns:
...@@ -1064,9 +1068,9 @@ def array_read(array, i): ...@@ -1064,9 +1068,9 @@ def array_read(array, i):
Given: Given:
array = [0.6, 0.1, 0.3, 0.1] array = [0.6, 0.1, 0.3, 0.1]
And: And:
i = 2 i = 2
Then: Then:
...@@ -1173,9 +1177,9 @@ def array_length(array): ...@@ -1173,9 +1177,9 @@ def array_length(array):
class ConditionalBlockGuard(BlockGuard): class ConditionalBlockGuard(BlockGuard):
""" """
ConditionalBlockGuard is derived from BlockGuard. It is dedicated for ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
holding a ConditionalBlock, and helping users entering and exiting the holding a ConditionalBlock, and helping users entering and exiting the
ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard
is generally an internal component of IfElse, users should not use it directly. is generally an internal component of IfElse, users should not use it directly.
""" """
...@@ -1929,7 +1933,7 @@ def is_empty(x, cond=None, **ignored): ...@@ -1929,7 +1933,7 @@ def is_empty(x, cond=None, **ignored):
Args: Args:
x (Variable): The Variable to be tested. x (Variable): The Variable to be tested.
cond (Variable|None): Output parameter. Returns the test result cond (Variable|None): Output parameter. Returns the test result
of given 'x'. Default: None of given 'x'. Default: None
Returns: Returns:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册