diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 8baea326e599184bbc5cdea3a4771b4fafca9c23..1e321716697ea4f8628bc796e6fd6e4a262fb8f1 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -1,17 +1,10 @@ paddle.fluid.Program.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.block ArgSpec(args=['self', 'index'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.clone ArgSpec(args=['self', 'for_test'], varargs=None, keywords=None, defaults=(False,)) -paddle.fluid.Program.copy_data_info_from ArgSpec(args=['self', 'other'], varargs=None, keywords=None, defaults=None) -paddle.fluid.Program.create_block ArgSpec(args=['self', 'parent_idx'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.Program.current_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) -paddle.fluid.Program.get_desc ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.global_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) -paddle.fluid.Program.inference_optimize ArgSpec(args=['self', 'export_for_deployment'], varargs=None, keywords=None, defaults=(True,)) paddle.fluid.Program.list_vars ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) -paddle.fluid.Program.optimized_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None) -paddle.fluid.Program.prune ArgSpec(args=['self', 'targets'], varargs=None, keywords=None, defaults=None) -paddle.fluid.Program.rollback ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)) paddle.fluid.Operator.__init__ ArgSpec(args=['self', 'block', 'desc', 'type', 'inputs', 'outputs', 'attrs'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.Operator.all_attrs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index a415cdbeaaae2a3bb4a137744205e3fe7366a78f..88eaae10dd55edcc7e811163acf17579eb32cbf1 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -347,7 +347,7 @@ def _append_backward_ops_(block, # If the op has its own sub-block, deal with the sub-block first if op.has_attr("sub_block"): sub_block = program.block(op.block_attr_id("sub_block")) - grad_sub_block = program.create_block() + grad_sub_block = program._create_block() grad_sub_block._set_forward_block_idx(sub_block.idx) cb = _callback_lookup_(op) if cb is not None: @@ -361,7 +361,7 @@ def _append_backward_ops_(block, _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, no_grad_dict, grad_to_var, callbacks) - program.rollback() + program._rollback() grad_sub_block_list.append(grad_sub_block.desc) # Getting op's corresponding grad_op diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index ba7ba3b5e983bfbaa82fc752f4821e8a934dfb8c..79904cec93d1732f9f3f25115869c63385bd6276 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -331,7 +331,7 @@ def append_gradient_clip_ops(param_grads): for p, g in param_grads: if g is None: continue - with p.block.program.optimized_guard([p, g]): + with p.block.program._optimized_guard([p, g]): clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr()) if clip_attr is None: clip_attr = NullGradientClipAttr() @@ -346,7 +346,7 @@ def append_gradient_clip_ops(param_grads): for p, g in param_grads: if g is None: continue - with p.block.program.optimized_guard([p, g]): + with p.block.program._optimized_guard([p, g]): res.append(clip_attr._create_operators(param=p, grad=g)) return res diff --git a/python/paddle/fluid/concurrency.py b/python/paddle/fluid/concurrency.py index b4a06f23a6f2713b665bdd42919925e4a0475a82..e375fdef9c6076f3268c86c0b79d9d484021e49d 100644 --- a/python/paddle/fluid/concurrency.py +++ b/python/paddle/fluid/concurrency.py @@ -126,7 +126,7 @@ class SelectCase(object): self.channel = channel def __enter__(self): - self.block = self.main_program.create_block() + self.block = self.main_program._create_block() def construct_op(self): main_program = self.helper.main_program @@ -187,7 +187,7 @@ class SelectCase(object): if self.value else '') def __exit__(self, exc_type, exc_val, exc_tb): - self.main_program.rollback() + self.main_program._rollback() if exc_type is not None: return False # re-raise exception return True diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 8892606486ee97bb085e642e89fce872e5ba1f7e..c0c8e0d58b0d24c07d10f2cf014f545ebf2515f7 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -935,7 +935,7 @@ class Block(object): Notes: The constructor of Block should not be invoked directly. Please - use `Program.create_block()` to create a block. + use `Program._create_block()` to create a block. Examples: .. code-block:: python @@ -1483,7 +1483,7 @@ class Program(object): self._op_role_var = [var_name] @contextlib.contextmanager - def optimized_guard(self, param_and_grads): + def _optimized_guard(self, param_and_grads): """ A with guard to set :code:`Optimization` :code:`OpRole` and :code:`OpRoleVar` automatically. @@ -1496,7 +1496,7 @@ class Program(object): Examples: >>> p, g = backward(...) - >>> with program.optimized_guard([p,g]): + >>> with program._optimized_guard([p,g]): >>> p = p - 0.001 * g """ OpRole = core.op_proto_and_checker_maker.OpRole @@ -1554,7 +1554,7 @@ class Program(object): res_str = _debug_string_(proto, throw_on_error) return res_str - def get_desc(self): + def _get_desc(self): """ Get the C++ side of `ProgramDesc` object pointer. The C++ object is exposed by :code:`pybind`. @@ -1647,7 +1647,7 @@ class Program(object): The two code snippets above will generate same programs. """ if for_test: - p = self.inference_optimize(export_for_deployment=False) + p = self._inference_optimize(export_for_deployment=False) else: p = Program() p.current_block_idx = self.current_block_idx @@ -1663,10 +1663,10 @@ class Program(object): p._sync_with_cpp() p._copy_param_info_from(self) - p.copy_data_info_from(self) + p._copy_data_info_from(self) return p - def prune(self, targets): + def _prune(self, targets): """ Prune operators and variables which are not needed to generate :code:`targets`. @@ -1717,7 +1717,7 @@ class Program(object): res._sync_with_cpp() return res - def inference_optimize(self, export_for_deployment=True): + def _inference_optimize(self, export_for_deployment=True): """ This method will create a new program and do following adjustments on it: 1. Remove all reader variables and their creator ops if exist. @@ -1841,7 +1841,7 @@ class Program(object): """ return self.blocks[self.current_block_idx] - def create_block(self, parent_idx=None): + def _create_block(self, parent_idx=None): """ Create a new block with the :code:`parent_idx` and change the current block to new block. @@ -1860,7 +1860,7 @@ class Program(object): self.blocks.append(Block(self, self.current_block_idx)) return self.current_block() - def rollback(self): + def _rollback(self): """ Exit a code block, i.e., roll back to the parent block. Returns: @@ -1906,7 +1906,7 @@ class Program(object): "program, with represent the same topology") self.global_block()._copy_param_info_from(other.global_block()) - def copy_data_info_from(self, other): + def _copy_data_info_from(self, other): """ Copy the information of data variables from other program. diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 656fafa0cb54d70e0eba8ec2bef21488c50d8d94..af653970418275548f0810afcf1dae173d9cb171 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -515,8 +515,8 @@ def get_inference_program(target_vars, main_program=None): vars.extend(var.metrics) else: vars.append(var) - pruned_program = main_program.prune(targets=vars) - inference_program = pruned_program.inference_optimize() + pruned_program = main_program._prune(targets=vars) + inference_program = pruned_program._inference_optimize() return inference_program @@ -644,8 +644,8 @@ def save_inference_model(dirname, global_block._remove_op(i) copy_program.desc.flush() - pruned_program = copy_program.prune(targets=target_vars) - inference_program = pruned_program.inference_optimize( + pruned_program = copy_program._prune(targets=target_vars) + inference_program = pruned_program._inference_optimize( export_for_deployment=export_for_deployment) fetch_var_names = [v.name for v in target_vars] diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index d7cee0429591f7831fbb4c06a9e3f0d8478a783f..0049773bbeb514d5dfef490e73b9988bd5371029 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -217,10 +217,10 @@ class BlockGuard(object): self.main_program = main_program def __enter__(self): - self.main_program.create_block() + self.main_program._create_block() def __exit__(self, exc_type, exc_val, exc_tb): - self.main_program.rollback() + self.main_program._rollback() if exc_type is not None: return False # re-raise exception return True diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 0cf7aaef4ab75ca6976465d1b404004a9f2f64c5..0881273c7a715c933e5c476b7341e8ce6941270e 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -1008,9 +1008,9 @@ class Preprocessor(object): @contextlib.contextmanager def block(self): self.status = Preprocessor.IN_SUB_BLOCK - self.sub_block = self.main_prog.create_block() + self.sub_block = self.main_prog._create_block() yield - self.main_prog.rollback() + self.main_prog._rollback() self.status = Preprocessor.AFTER_SUB_BLOCK if not self._is_completed(): raise RuntimeError( diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 215f0cf2fc5ab4fbd06719ac4790a01dd00080eb..ef7b16a19e10a28bd1cc34496fb908580c5d7330 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -236,7 +236,7 @@ class Optimizer(object): for param_and_grad in parameters_and_grads: if param_and_grad[1] is None: continue - with param_and_grad[0].block.program.optimized_guard( + with param_and_grad[0].block.program._optimized_guard( param_and_grad), name_scope("optimizer"): if param_and_grad[0].trainable is True: optimize_op = self._append_optimize_op(loss.block, @@ -580,7 +580,7 @@ class AdamOptimizer(Optimizer): for param, grad in param_and_grads: if grad is None: continue - with param.block.program.optimized_guard([param, grad]): + with param.block.program._optimized_guard([param, grad]): beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, param) beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, @@ -709,7 +709,7 @@ class AdamaxOptimizer(Optimizer): for param, grad in parameters_and_grads: if grad is None: continue - with param.block.program.optimized_guard([param, grad]): + with param.block.program._optimized_guard([param, grad]): beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, param) main_block.append_op( @@ -1198,7 +1198,7 @@ class ModelAverage(Optimizer): for param, grad in self.params_grads: if grad is None: continue - with param.block.program.optimized_guard([param, grad]): + with param.block.program._optimized_guard([param, grad]): self._append_average_accumulate_op(param) self.apply_program = Program() diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index da38626111a6767e1a76a35d6d1375ccc1283de4..8f4678649f2146c84150ad2659e497bbf0365d03 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -47,7 +47,7 @@ def append_regularization_ops(parameters_and_grads, regularization=None): if grad is None: params_and_grads.append((param, grad)) continue - with param.block.program.optimized_guard([param, grad]): + with param.block.program._optimized_guard([param, grad]): regularization_term = None if param.regularizer is not None: # Add variable for regularization term in grad block diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index cac132e6e08a8a9ec595236b1a990c0900ea4f0f..4153394c1da776d0a41e1415a09fa7d6f4b14d6d 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -26,7 +26,7 @@ main_program = default_startup_program() class TestOperator(unittest.TestCase): def test_error_type(self): - block = main_program.create_block() + block = main_program._create_block() try: block.append_op() self.assertFail() diff --git a/python/paddle/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py index 0997afc97a97333c914a3027103ec48733b410dc..0b9fba5fe376474b084fd233ace41c9c0cd53547 100644 --- a/python/paddle/fluid/tests/unittests/test_program.py +++ b/python/paddle/fluid/tests/unittests/test_program.py @@ -28,25 +28,25 @@ class TestProgram(unittest.TestCase): self.assertEqual(-1, b.parent_idx) self.assertEqual(0, b.idx) - b = main_program.create_block() + b = main_program._create_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) - b = main_program.create_block() + b = main_program._create_block() self.assertEqual(2, b.idx) self.assertEqual(1, b.parent_idx) - main_program.rollback() + main_program._rollback() b = main_program.current_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) - b = main_program.create_block() + b = main_program._create_block() self.assertEqual(3, b.idx) self.assertEqual(1, b.parent_idx) - main_program.rollback() + main_program._rollback() b = main_program.current_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) @@ -120,8 +120,8 @@ class TestProgram(unittest.TestCase): main_program = fluid.Program() with fluid.program_guard(main_program, startup_program): net() - no_read_program = main_program.inference_optimize() - keep_read_program = main_program.inference_optimize( + no_read_program = main_program._inference_optimize() + keep_read_program = main_program._inference_optimize( export_for_deployment=False) no_read_ops = no_read_program.global_block().ops keep_read_ops = keep_read_program.global_block().ops diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index e070ea8d428831c490348fedbf1f8865fdb9910c..f58f1883a407a3123856e19b5ec8fc01862466a7 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -580,7 +580,7 @@ class DistributeTranspiler(object): assert isinstance(origin_block, Block) # we put the new sub block to new block to follow the block # hierarchy of the original blocks - new_sub_block = program.create_block(lr_block.idx) + new_sub_block = program._create_block(lr_block.idx) # clone vars for var in origin_block.vars: @@ -600,7 +600,7 @@ class DistributeTranspiler(object): # record optimize blocks and we can run them on pserver parallel optimize_blocks = [] if len(lr_ops) > 0: - lr_decay_block = pserver_program.create_block( + lr_decay_block = pserver_program._create_block( pserver_program.num_blocks - 1) optimize_blocks.append(lr_decay_block) for _, op in enumerate(lr_ops): @@ -613,7 +613,7 @@ class DistributeTranspiler(object): grad_to_block_id = [] pre_block_idx = pserver_program.num_blocks - 1 for idx, opt_op in enumerate(opt_op_on_pserver): - per_opt_block = pserver_program.create_block(pre_block_idx) + per_opt_block = pserver_program._create_block(pre_block_idx) optimize_blocks.append(per_opt_block) # append grad merging ops before clip and weight decay # cases may like: @@ -636,7 +636,7 @@ class DistributeTranspiler(object): grad_to_block_id = list(set(grad_to_block_id)) # append global ops if global_ops: - opt_state_block = pserver_program.create_block( + opt_state_block = pserver_program._create_block( pserver_program.num_blocks - 1) optimize_blocks.append(opt_state_block) for glb_op in global_ops: @@ -1073,7 +1073,7 @@ class DistributeTranspiler(object): table_var = pserver_program.global_block().vars[self.table_name] prefetch_var_name_to_block_id = [] for index in range(len(self.all_prefetch_input_vars)): - prefetch_block = pserver_program.create_block(optimize_block.idx) + prefetch_block = pserver_program._create_block(optimize_block.idx) trainer_ids = self.all_prefetch_input_vars[index][pserver_index] pserver_ids = pserver_program.global_block().create_var( name=trainer_ids.name, @@ -1131,7 +1131,7 @@ class DistributeTranspiler(object): if 'Param' in op.input_names and op.input("Param")[0] == self.table_name ][0] - table_opt_block = pserver_program.create_block(pre_block_idx) + table_opt_block = pserver_program._create_block(pre_block_idx) if self.sync_mode: # create grad vars in pserver program @@ -1194,7 +1194,7 @@ class DistributeTranspiler(object): persistable=True, type=core.VarDesc.VarType.RAW) - checkpoint_save_block = pserver_program.create_block(pre_block_idx) + checkpoint_save_block = pserver_program._create_block(pre_block_idx) # this 'file_path' do not be used in save lookup table variable checkpoint_save_block.append_op( type='save', diff --git a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py index 3e58e125de4188144646236f7999c620cd8e9459..21607f9ab7a8e6ca6f250e1959445da1b7d9975f 100644 --- a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py +++ b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py @@ -357,7 +357,7 @@ def _get_cfgs(input_program): :return: A list of ControlFlowGraph, each corresponds to a block. """ ops_list = [] - pdesc = input_program.get_desc() + pdesc = input_program._get_desc() block_desc = pdesc.block(0) op_size = block_desc.op_size()