From f372f27e3f724a55ed221f18d3c30b18b4cde8cf Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Mon, 30 Jul 2018 15:36:15 +0800 Subject: [PATCH] Hidden APIs for While, StaticRNN, ParallelDo. (#12332) * Hidden APIs for While, StaticRNN, ParallelDo. --- paddle/fluid/API.spec | 5 +---- python/paddle/fluid/layers/control_flow.py | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 6efb03dabe..5f3bfa2965 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -170,6 +170,7 @@ paddle.fluid.layers.mean_iou ArgSpec(args=['input', 'label', 'num_classes'], var paddle.fluid.layers.relu ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.log ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.crop ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.layers.rank_loss ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) paddle.fluid.layers.open_recordio_file ArgSpec(args=['filename', 'shapes', 'lod_levels', 'dtypes', 'pass_num', 'for_parallel'], varargs=None, keywords=None, defaults=(1, True)) paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) @@ -201,7 +202,6 @@ paddle.fluid.layers.zeros ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs= paddle.fluid.layers.reverse ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.While.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.While.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) -paddle.fluid.layers.While.complete ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.Switch.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.Switch.case ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.Switch.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) @@ -225,17 +225,14 @@ paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs= paddle.fluid.layers.DynamicRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.DynamicRNN.update_memory ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)) -paddle.fluid.layers.StaticRNN.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.memory ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1)) paddle.fluid.layers.StaticRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None) -paddle.fluid.layers.StaticRNN.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.step ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.step_output ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.update_memory ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.reorder_lod_tensor_by_rank ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.ParallelDo.__init__ ArgSpec(args=['self', 'places', 'use_nccl', 'name'], varargs=None, keywords=None, defaults=(False, None)) -paddle.fluid.layers.ParallelDo.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.ParallelDo.do ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.ParallelDo.get_parameters ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.ParallelDo.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index f05ae6d5d1..3ee1c636ac 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -21,6 +21,7 @@ from ..layer_helper import LayerHelper, unique_name from ..initializer import force_init_on_cpu from ops import logical_and, logical_not, logical_or import numpy +import warnings __all__ = [ 'While', @@ -280,6 +281,9 @@ class ParallelDo(object): """ def __init__(self, places, use_nccl=False, name=None): + warnings.warn( + "API ParallelDo is deprecated since 0.15.0. Please use ParallelExecutor instead.", + Warning) self.helper = LayerHelper("parallel_do", name=name) self.inputs = [] self.places = places @@ -338,7 +342,7 @@ class ParallelDo(object): return [parent_block.var(name) for name in params] - def complete_op(self): + def _complete_op(self): main_program = self.helper.main_program current_block = main_program.current_block() parent_block = self.parent_block() @@ -394,7 +398,7 @@ class BlockGuardWithCompletion(BlockGuard): if exc_type is not None: return False self.rnn.status = StaticRNN.AFTER_RNN_BLOCK - self.rnn.complete_op() + self.rnn._complete_op() return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val, exc_tb) @@ -470,7 +474,7 @@ class StaticRNN(object): if shape is None or batch_ref is None: raise ValueError( "if init is None, memory at least need shape and batch_ref") - parent_block = self.parent_block() + parent_block = self._parent_block() var_name = unique_name.generate("@".join( [self.helper.name, "memory_boot"])) boot_var = parent_block.create_var( @@ -527,7 +531,7 @@ class StaticRNN(object): outputs={'Out': tmp_o}, attrs={'dtype': o.dtype}) - out_var = self.parent_block().create_var( + out_var = self._parent_block().create_var( name=tmp_o.name, shape=[self.seq_len] + list(tmp_o.shape), dtype=tmp_o.dtype) @@ -543,7 +547,7 @@ class StaticRNN(object): raise TypeError("update memory should take variables") self.memories[mem.name].mem = var - def parent_block(self): + def _parent_block(self): prog = self.helper.main_program parent_idx = prog.current_block().parent_idx assert parent_idx >= 0 @@ -560,10 +564,10 @@ class StaticRNN(object): else: return self.outputs - def complete_op(self): + def _complete_op(self): main_program = self.helper.main_program rnn_block = main_program.current_block() - parent_block = self.parent_block() + parent_block = self._parent_block() local_inputs = set() @@ -643,7 +647,7 @@ class WhileGuard(BlockGuard): if exc_type is not None: return False self.while_op.status = While.AFTER_WHILE_BLOCK - self.while_op.complete() + self.while_op._complete() return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb) @@ -690,7 +694,7 @@ class While(object): def block(self): return WhileGuard(self) - def complete(self): + def _complete(self): main_program = self.helper.main_program while_block = main_program.current_block() parent_block = main_program.block(main_program.current_block() -- GitLab