未验证 提交 efafc72f 编写于 作者: W Wu Yi 提交者: GitHub

Hide program APIs (#12315)

* hide program APIs

* fix merge error

* update
上级 c9e5c1e4
paddle.fluid.Program.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.block ArgSpec(args=['self', 'index'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.block ArgSpec(args=['self', 'index'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.clone ArgSpec(args=['self', 'for_test'], varargs=None, keywords=None, defaults=(False,)) paddle.fluid.Program.clone ArgSpec(args=['self', 'for_test'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.Program.copy_data_info_from ArgSpec(args=['self', 'other'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.create_block ArgSpec(args=['self', 'parent_idx'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.Program.current_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.current_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.get_desc ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.global_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.global_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.inference_optimize ArgSpec(args=['self', 'export_for_deployment'], varargs=None, keywords=None, defaults=(True,))
paddle.fluid.Program.list_vars ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.list_vars ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.optimized_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None) paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.prune ArgSpec(args=['self', 'targets'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.rollback ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)) paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.Operator.__init__ ArgSpec(args=['self', 'block', 'desc', 'type', 'inputs', 'outputs', 'attrs'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.Operator.__init__ ArgSpec(args=['self', 'block', 'desc', 'type', 'inputs', 'outputs', 'attrs'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.Operator.all_attrs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Operator.all_attrs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
......
...@@ -347,7 +347,7 @@ def _append_backward_ops_(block, ...@@ -347,7 +347,7 @@ def _append_backward_ops_(block,
# If the op has its own sub-block, deal with the sub-block first # If the op has its own sub-block, deal with the sub-block first
if op.has_attr("sub_block"): if op.has_attr("sub_block"):
sub_block = program.block(op.block_attr_id("sub_block")) sub_block = program.block(op.block_attr_id("sub_block"))
grad_sub_block = program.create_block() grad_sub_block = program._create_block()
grad_sub_block._set_forward_block_idx(sub_block.idx) grad_sub_block._set_forward_block_idx(sub_block.idx)
cb = _callback_lookup_(op) cb = _callback_lookup_(op)
if cb is not None: if cb is not None:
...@@ -361,7 +361,7 @@ def _append_backward_ops_(block, ...@@ -361,7 +361,7 @@ def _append_backward_ops_(block,
_append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block,
no_grad_dict, grad_to_var, callbacks) no_grad_dict, grad_to_var, callbacks)
program.rollback() program._rollback()
grad_sub_block_list.append(grad_sub_block.desc) grad_sub_block_list.append(grad_sub_block.desc)
# Getting op's corresponding grad_op # Getting op's corresponding grad_op
......
...@@ -331,7 +331,7 @@ def append_gradient_clip_ops(param_grads): ...@@ -331,7 +331,7 @@ def append_gradient_clip_ops(param_grads):
for p, g in param_grads: for p, g in param_grads:
if g is None: if g is None:
continue continue
with p.block.program.optimized_guard([p, g]): with p.block.program._optimized_guard([p, g]):
clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr()) clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr())
if clip_attr is None: if clip_attr is None:
clip_attr = NullGradientClipAttr() clip_attr = NullGradientClipAttr()
...@@ -346,7 +346,7 @@ def append_gradient_clip_ops(param_grads): ...@@ -346,7 +346,7 @@ def append_gradient_clip_ops(param_grads):
for p, g in param_grads: for p, g in param_grads:
if g is None: if g is None:
continue continue
with p.block.program.optimized_guard([p, g]): with p.block.program._optimized_guard([p, g]):
res.append(clip_attr._create_operators(param=p, grad=g)) res.append(clip_attr._create_operators(param=p, grad=g))
return res return res
......
...@@ -126,7 +126,7 @@ class SelectCase(object): ...@@ -126,7 +126,7 @@ class SelectCase(object):
self.channel = channel self.channel = channel
def __enter__(self): def __enter__(self):
self.block = self.main_program.create_block() self.block = self.main_program._create_block()
def construct_op(self): def construct_op(self):
main_program = self.helper.main_program main_program = self.helper.main_program
...@@ -187,7 +187,7 @@ class SelectCase(object): ...@@ -187,7 +187,7 @@ class SelectCase(object):
if self.value else '') if self.value else '')
def __exit__(self, exc_type, exc_val, exc_tb): def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.rollback() self.main_program._rollback()
if exc_type is not None: if exc_type is not None:
return False # re-raise exception return False # re-raise exception
return True return True
......
...@@ -935,7 +935,7 @@ class Block(object): ...@@ -935,7 +935,7 @@ class Block(object):
Notes: Notes:
The constructor of Block should not be invoked directly. Please The constructor of Block should not be invoked directly. Please
use `Program.create_block()` to create a block. use `Program._create_block()` to create a block.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -1483,7 +1483,7 @@ class Program(object): ...@@ -1483,7 +1483,7 @@ class Program(object):
self._op_role_var = [var_name] self._op_role_var = [var_name]
@contextlib.contextmanager @contextlib.contextmanager
def optimized_guard(self, param_and_grads): def _optimized_guard(self, param_and_grads):
""" """
A with guard to set :code:`Optimization` :code:`OpRole` and A with guard to set :code:`Optimization` :code:`OpRole` and
:code:`OpRoleVar` automatically. :code:`OpRoleVar` automatically.
...@@ -1496,7 +1496,7 @@ class Program(object): ...@@ -1496,7 +1496,7 @@ class Program(object):
Examples: Examples:
>>> p, g = backward(...) >>> p, g = backward(...)
>>> with program.optimized_guard([p,g]): >>> with program._optimized_guard([p,g]):
>>> p = p - 0.001 * g >>> p = p - 0.001 * g
""" """
OpRole = core.op_proto_and_checker_maker.OpRole OpRole = core.op_proto_and_checker_maker.OpRole
...@@ -1554,7 +1554,7 @@ class Program(object): ...@@ -1554,7 +1554,7 @@ class Program(object):
res_str = _debug_string_(proto, throw_on_error) res_str = _debug_string_(proto, throw_on_error)
return res_str return res_str
def get_desc(self): def _get_desc(self):
""" """
Get the C++ side of `ProgramDesc` object pointer. The C++ object is Get the C++ side of `ProgramDesc` object pointer. The C++ object is
exposed by :code:`pybind`. exposed by :code:`pybind`.
...@@ -1647,7 +1647,7 @@ class Program(object): ...@@ -1647,7 +1647,7 @@ class Program(object):
The two code snippets above will generate same programs. The two code snippets above will generate same programs.
""" """
if for_test: if for_test:
p = self.inference_optimize(export_for_deployment=False) p = self._inference_optimize(export_for_deployment=False)
else: else:
p = Program() p = Program()
p.current_block_idx = self.current_block_idx p.current_block_idx = self.current_block_idx
...@@ -1663,10 +1663,10 @@ class Program(object): ...@@ -1663,10 +1663,10 @@ class Program(object):
p._sync_with_cpp() p._sync_with_cpp()
p._copy_param_info_from(self) p._copy_param_info_from(self)
p.copy_data_info_from(self) p._copy_data_info_from(self)
return p return p
def prune(self, targets): def _prune(self, targets):
""" """
Prune operators and variables which are not needed to generate Prune operators and variables which are not needed to generate
:code:`targets`. :code:`targets`.
...@@ -1717,7 +1717,7 @@ class Program(object): ...@@ -1717,7 +1717,7 @@ class Program(object):
res._sync_with_cpp() res._sync_with_cpp()
return res return res
def inference_optimize(self, export_for_deployment=True): def _inference_optimize(self, export_for_deployment=True):
""" """
This method will create a new program and do following adjustments on it: This method will create a new program and do following adjustments on it:
1. Remove all reader variables and their creator ops if exist. 1. Remove all reader variables and their creator ops if exist.
...@@ -1841,7 +1841,7 @@ class Program(object): ...@@ -1841,7 +1841,7 @@ class Program(object):
""" """
return self.blocks[self.current_block_idx] return self.blocks[self.current_block_idx]
def create_block(self, parent_idx=None): def _create_block(self, parent_idx=None):
""" """
Create a new block with the :code:`parent_idx` and change the current block Create a new block with the :code:`parent_idx` and change the current block
to new block. to new block.
...@@ -1860,7 +1860,7 @@ class Program(object): ...@@ -1860,7 +1860,7 @@ class Program(object):
self.blocks.append(Block(self, self.current_block_idx)) self.blocks.append(Block(self, self.current_block_idx))
return self.current_block() return self.current_block()
def rollback(self): def _rollback(self):
""" """
Exit a code block, i.e., roll back to the parent block. Exit a code block, i.e., roll back to the parent block.
Returns: Returns:
...@@ -1906,7 +1906,7 @@ class Program(object): ...@@ -1906,7 +1906,7 @@ class Program(object):
"program, with represent the same topology") "program, with represent the same topology")
self.global_block()._copy_param_info_from(other.global_block()) self.global_block()._copy_param_info_from(other.global_block())
def copy_data_info_from(self, other): def _copy_data_info_from(self, other):
""" """
Copy the information of data variables from other program. Copy the information of data variables from other program.
......
...@@ -515,8 +515,8 @@ def get_inference_program(target_vars, main_program=None): ...@@ -515,8 +515,8 @@ def get_inference_program(target_vars, main_program=None):
vars.extend(var.metrics) vars.extend(var.metrics)
else: else:
vars.append(var) vars.append(var)
pruned_program = main_program.prune(targets=vars) pruned_program = main_program._prune(targets=vars)
inference_program = pruned_program.inference_optimize() inference_program = pruned_program._inference_optimize()
return inference_program return inference_program
...@@ -644,8 +644,8 @@ def save_inference_model(dirname, ...@@ -644,8 +644,8 @@ def save_inference_model(dirname,
global_block._remove_op(i) global_block._remove_op(i)
copy_program.desc.flush() copy_program.desc.flush()
pruned_program = copy_program.prune(targets=target_vars) pruned_program = copy_program._prune(targets=target_vars)
inference_program = pruned_program.inference_optimize( inference_program = pruned_program._inference_optimize(
export_for_deployment=export_for_deployment) export_for_deployment=export_for_deployment)
fetch_var_names = [v.name for v in target_vars] fetch_var_names = [v.name for v in target_vars]
......
...@@ -217,10 +217,10 @@ class BlockGuard(object): ...@@ -217,10 +217,10 @@ class BlockGuard(object):
self.main_program = main_program self.main_program = main_program
def __enter__(self): def __enter__(self):
self.main_program.create_block() self.main_program._create_block()
def __exit__(self, exc_type, exc_val, exc_tb): def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.rollback() self.main_program._rollback()
if exc_type is not None: if exc_type is not None:
return False # re-raise exception return False # re-raise exception
return True return True
......
...@@ -1008,9 +1008,9 @@ class Preprocessor(object): ...@@ -1008,9 +1008,9 @@ class Preprocessor(object):
@contextlib.contextmanager @contextlib.contextmanager
def block(self): def block(self):
self.status = Preprocessor.IN_SUB_BLOCK self.status = Preprocessor.IN_SUB_BLOCK
self.sub_block = self.main_prog.create_block() self.sub_block = self.main_prog._create_block()
yield yield
self.main_prog.rollback() self.main_prog._rollback()
self.status = Preprocessor.AFTER_SUB_BLOCK self.status = Preprocessor.AFTER_SUB_BLOCK
if not self._is_completed(): if not self._is_completed():
raise RuntimeError( raise RuntimeError(
......
...@@ -236,7 +236,7 @@ class Optimizer(object): ...@@ -236,7 +236,7 @@ class Optimizer(object):
for param_and_grad in parameters_and_grads: for param_and_grad in parameters_and_grads:
if param_and_grad[1] is None: if param_and_grad[1] is None:
continue continue
with param_and_grad[0].block.program.optimized_guard( with param_and_grad[0].block.program._optimized_guard(
param_and_grad), name_scope("optimizer"): param_and_grad), name_scope("optimizer"):
if param_and_grad[0].trainable is True: if param_and_grad[0].trainable is True:
optimize_op = self._append_optimize_op(loss.block, optimize_op = self._append_optimize_op(loss.block,
...@@ -580,7 +580,7 @@ class AdamOptimizer(Optimizer): ...@@ -580,7 +580,7 @@ class AdamOptimizer(Optimizer):
for param, grad in param_and_grads: for param, grad in param_and_grads:
if grad is None: if grad is None:
continue continue
with param.block.program.optimized_guard([param, grad]): with param.block.program._optimized_guard([param, grad]):
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param) param)
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
...@@ -709,7 +709,7 @@ class AdamaxOptimizer(Optimizer): ...@@ -709,7 +709,7 @@ class AdamaxOptimizer(Optimizer):
for param, grad in parameters_and_grads: for param, grad in parameters_and_grads:
if grad is None: if grad is None:
continue continue
with param.block.program.optimized_guard([param, grad]): with param.block.program._optimized_guard([param, grad]):
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param) param)
main_block.append_op( main_block.append_op(
...@@ -1198,7 +1198,7 @@ class ModelAverage(Optimizer): ...@@ -1198,7 +1198,7 @@ class ModelAverage(Optimizer):
for param, grad in self.params_grads: for param, grad in self.params_grads:
if grad is None: if grad is None:
continue continue
with param.block.program.optimized_guard([param, grad]): with param.block.program._optimized_guard([param, grad]):
self._append_average_accumulate_op(param) self._append_average_accumulate_op(param)
self.apply_program = Program() self.apply_program = Program()
......
...@@ -47,7 +47,7 @@ def append_regularization_ops(parameters_and_grads, regularization=None): ...@@ -47,7 +47,7 @@ def append_regularization_ops(parameters_and_grads, regularization=None):
if grad is None: if grad is None:
params_and_grads.append((param, grad)) params_and_grads.append((param, grad))
continue continue
with param.block.program.optimized_guard([param, grad]): with param.block.program._optimized_guard([param, grad]):
regularization_term = None regularization_term = None
if param.regularizer is not None: if param.regularizer is not None:
# Add variable for regularization term in grad block # Add variable for regularization term in grad block
......
...@@ -26,7 +26,7 @@ main_program = default_startup_program() ...@@ -26,7 +26,7 @@ main_program = default_startup_program()
class TestOperator(unittest.TestCase): class TestOperator(unittest.TestCase):
def test_error_type(self): def test_error_type(self):
block = main_program.create_block() block = main_program._create_block()
try: try:
block.append_op() block.append_op()
self.assertFail() self.assertFail()
......
...@@ -28,25 +28,25 @@ class TestProgram(unittest.TestCase): ...@@ -28,25 +28,25 @@ class TestProgram(unittest.TestCase):
self.assertEqual(-1, b.parent_idx) self.assertEqual(-1, b.parent_idx)
self.assertEqual(0, b.idx) self.assertEqual(0, b.idx)
b = main_program.create_block() b = main_program._create_block()
self.assertEqual(1, b.idx) self.assertEqual(1, b.idx)
self.assertEqual(0, b.parent_idx) self.assertEqual(0, b.parent_idx)
b = main_program.create_block() b = main_program._create_block()
self.assertEqual(2, b.idx) self.assertEqual(2, b.idx)
self.assertEqual(1, b.parent_idx) self.assertEqual(1, b.parent_idx)
main_program.rollback() main_program._rollback()
b = main_program.current_block() b = main_program.current_block()
self.assertEqual(1, b.idx) self.assertEqual(1, b.idx)
self.assertEqual(0, b.parent_idx) self.assertEqual(0, b.parent_idx)
b = main_program.create_block() b = main_program._create_block()
self.assertEqual(3, b.idx) self.assertEqual(3, b.idx)
self.assertEqual(1, b.parent_idx) self.assertEqual(1, b.parent_idx)
main_program.rollback() main_program._rollback()
b = main_program.current_block() b = main_program.current_block()
self.assertEqual(1, b.idx) self.assertEqual(1, b.idx)
self.assertEqual(0, b.parent_idx) self.assertEqual(0, b.parent_idx)
...@@ -120,8 +120,8 @@ class TestProgram(unittest.TestCase): ...@@ -120,8 +120,8 @@ class TestProgram(unittest.TestCase):
main_program = fluid.Program() main_program = fluid.Program()
with fluid.program_guard(main_program, startup_program): with fluid.program_guard(main_program, startup_program):
net() net()
no_read_program = main_program.inference_optimize() no_read_program = main_program._inference_optimize()
keep_read_program = main_program.inference_optimize( keep_read_program = main_program._inference_optimize(
export_for_deployment=False) export_for_deployment=False)
no_read_ops = no_read_program.global_block().ops no_read_ops = no_read_program.global_block().ops
keep_read_ops = keep_read_program.global_block().ops keep_read_ops = keep_read_program.global_block().ops
......
...@@ -580,7 +580,7 @@ class DistributeTranspiler(object): ...@@ -580,7 +580,7 @@ class DistributeTranspiler(object):
assert isinstance(origin_block, Block) assert isinstance(origin_block, Block)
# we put the new sub block to new block to follow the block # we put the new sub block to new block to follow the block
# hierarchy of the original blocks # hierarchy of the original blocks
new_sub_block = program.create_block(lr_block.idx) new_sub_block = program._create_block(lr_block.idx)
# clone vars # clone vars
for var in origin_block.vars: for var in origin_block.vars:
...@@ -600,7 +600,7 @@ class DistributeTranspiler(object): ...@@ -600,7 +600,7 @@ class DistributeTranspiler(object):
# record optimize blocks and we can run them on pserver parallel # record optimize blocks and we can run them on pserver parallel
optimize_blocks = [] optimize_blocks = []
if len(lr_ops) > 0: if len(lr_ops) > 0:
lr_decay_block = pserver_program.create_block( lr_decay_block = pserver_program._create_block(
pserver_program.num_blocks - 1) pserver_program.num_blocks - 1)
optimize_blocks.append(lr_decay_block) optimize_blocks.append(lr_decay_block)
for _, op in enumerate(lr_ops): for _, op in enumerate(lr_ops):
...@@ -613,7 +613,7 @@ class DistributeTranspiler(object): ...@@ -613,7 +613,7 @@ class DistributeTranspiler(object):
grad_to_block_id = [] grad_to_block_id = []
pre_block_idx = pserver_program.num_blocks - 1 pre_block_idx = pserver_program.num_blocks - 1
for idx, opt_op in enumerate(opt_op_on_pserver): for idx, opt_op in enumerate(opt_op_on_pserver):
per_opt_block = pserver_program.create_block(pre_block_idx) per_opt_block = pserver_program._create_block(pre_block_idx)
optimize_blocks.append(per_opt_block) optimize_blocks.append(per_opt_block)
# append grad merging ops before clip and weight decay # append grad merging ops before clip and weight decay
# cases may like: # cases may like:
...@@ -636,7 +636,7 @@ class DistributeTranspiler(object): ...@@ -636,7 +636,7 @@ class DistributeTranspiler(object):
grad_to_block_id = list(set(grad_to_block_id)) grad_to_block_id = list(set(grad_to_block_id))
# append global ops # append global ops
if global_ops: if global_ops:
opt_state_block = pserver_program.create_block( opt_state_block = pserver_program._create_block(
pserver_program.num_blocks - 1) pserver_program.num_blocks - 1)
optimize_blocks.append(opt_state_block) optimize_blocks.append(opt_state_block)
for glb_op in global_ops: for glb_op in global_ops:
...@@ -1073,7 +1073,7 @@ class DistributeTranspiler(object): ...@@ -1073,7 +1073,7 @@ class DistributeTranspiler(object):
table_var = pserver_program.global_block().vars[self.table_name] table_var = pserver_program.global_block().vars[self.table_name]
prefetch_var_name_to_block_id = [] prefetch_var_name_to_block_id = []
for index in range(len(self.all_prefetch_input_vars)): for index in range(len(self.all_prefetch_input_vars)):
prefetch_block = pserver_program.create_block(optimize_block.idx) prefetch_block = pserver_program._create_block(optimize_block.idx)
trainer_ids = self.all_prefetch_input_vars[index][pserver_index] trainer_ids = self.all_prefetch_input_vars[index][pserver_index]
pserver_ids = pserver_program.global_block().create_var( pserver_ids = pserver_program.global_block().create_var(
name=trainer_ids.name, name=trainer_ids.name,
...@@ -1131,7 +1131,7 @@ class DistributeTranspiler(object): ...@@ -1131,7 +1131,7 @@ class DistributeTranspiler(object):
if 'Param' in op.input_names and op.input("Param")[0] == if 'Param' in op.input_names and op.input("Param")[0] ==
self.table_name self.table_name
][0] ][0]
table_opt_block = pserver_program.create_block(pre_block_idx) table_opt_block = pserver_program._create_block(pre_block_idx)
if self.sync_mode: if self.sync_mode:
# create grad vars in pserver program # create grad vars in pserver program
...@@ -1194,7 +1194,7 @@ class DistributeTranspiler(object): ...@@ -1194,7 +1194,7 @@ class DistributeTranspiler(object):
persistable=True, persistable=True,
type=core.VarDesc.VarType.RAW) type=core.VarDesc.VarType.RAW)
checkpoint_save_block = pserver_program.create_block(pre_block_idx) checkpoint_save_block = pserver_program._create_block(pre_block_idx)
# this 'file_path' do not be used in save lookup table variable # this 'file_path' do not be used in save lookup table variable
checkpoint_save_block.append_op( checkpoint_save_block.append_op(
type='save', type='save',
......
...@@ -357,7 +357,7 @@ def _get_cfgs(input_program): ...@@ -357,7 +357,7 @@ def _get_cfgs(input_program):
:return: A list of ControlFlowGraph, each corresponds to a block. :return: A list of ControlFlowGraph, each corresponds to a block.
""" """
ops_list = [] ops_list = []
pdesc = input_program.get_desc() pdesc = input_program._get_desc()
block_desc = pdesc.block(0) block_desc = pdesc.block(0)
op_size = block_desc.op_size() op_size = block_desc.op_size()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册