From 1e045cae8f220cc6e853c681912b1c1e9bf3e6ed Mon Sep 17 00:00:00 2001 From: 0x45f <23097963+0x45f@users.noreply.github.com> Date: Thu, 17 Mar 2022 10:27:55 +0800 Subject: [PATCH] Refine io for test_mnist.py (#40496) * for test_mnist.py * remove comments * using type() replace isinstance() * valid vars for run program OP in io.py * open test_mnist in eager_gurad for coverage --- python/paddle/fluid/dygraph/io.py | 68 ++++++++----------- python/paddle/fluid/dygraph/jit.py | 2 +- python/paddle/fluid/dygraph/layers.py | 3 +- .../unittests/dygraph_to_static/test_mnist.py | 8 +++ python/paddle/static/input.py | 2 +- 5 files changed, 41 insertions(+), 42 deletions(-) diff --git a/python/paddle/fluid/dygraph/io.py b/python/paddle/fluid/dygraph/io.py index f58952d3036..a36164a277d 100644 --- a/python/paddle/fluid/dygraph/io.py +++ b/python/paddle/fluid/dygraph/io.py @@ -30,6 +30,7 @@ from paddle.fluid.layers import nn from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.dygraph.base import switch_to_static_graph from paddle.fluid.framework import in_dygraph_mode +from paddle import _C_ops __all__ = ['TranslatedLayer'] @@ -761,6 +762,21 @@ def _construct_params_and_buffers(model_path, return var_dict +def _valid_vars(vars): + if vars: + return vars + if framework._in_eager_mode(): + return [ + core.eager.Tensor(core.VarDesc.VarType.FP32, [], "Fake_var", + core.VarDesc.VarType.RAW, False) + ] + else: + return [ + core.VarBase(core.VarDesc.VarType.FP32, [], "Fake_var", + core.VarDesc.VarType.RAW, False) + ] + + def _run_dygraph(instance, input, program_holder): # 1. prepare inputs, outputs, attrs @@ -826,17 +842,12 @@ def _run_dygraph(instance, input, program_holder): # hold forward variables if framework._in_eager_mode(): - tmp_scope_vec = core.eager.Tensor( - dtype=core.VarDesc.VarType.FP32, - dims=[], - name="program_out_scope", - type=core.VarDesc.VarType.STEP_SCOPES, - persistable=True) + tmp_scope_vec = [program_holder.scope] else: tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [], "program_out_scope", core.VarDesc.VarType.STEP_SCOPES, True) - tmp_scope_vec.value().set_scope(program_holder.scope) + tmp_scope_vec.value().set_scope(program_holder.scope) double_grad_vars = [] for var_desc in program_holder.double_grad_descs: @@ -852,41 +863,18 @@ def _run_dygraph(instance, input, program_holder): var_desc.shape(), var_desc.name(), var_desc.type(), False) double_grad_vars.append(var) - if len(double_grad_vars) == 0: - if framework._in_eager_mode(): - double_grad_vars = [ - core.eager.Tensor( - value=[1], - name='Fake_var', - place=framework._current_expected_place()) - ] - else: - double_grad_vars = [ - core.VarBase( - value=[1], - name='Fake_var', - place=framework._current_expected_place()) - ] # 2. run program by op trace_program = program_holder.infer_program if instance._is_test else program_holder.train_program end_op_index = program_holder.infer_program.block(0).op_size() - framework._dygraph_tracer().trace_op( - type='run_program', - inputs={'X': input_vars, - 'Params': persistable_vars}, - outputs={ - 'Out': output_vars, - 'OutScope': tmp_scope_vec, - 'DOut': double_grad_vars - }, - attrs={ - 'global_block': trace_program.block(0), - 'start_op_index': 0, - 'end_op_index': end_op_index, - 'is_test': instance._is_test, - 'program_id': _hash_with_id(trace_program, instance) - }) + attrs = ('global_block', trace_program.block(0), 'start_op_index', 0, + 'end_op_index', end_op_index, 'is_test', instance._is_test, + 'program_id', _hash_with_id(trace_program, instance)) + _C_ops.run_program( + _valid_vars(input_vars), + _valid_vars(persistable_vars), + _valid_vars(output_vars), tmp_scope_vec, + _valid_vars(double_grad_vars), *attrs) # NOTE: [ why need set param's gradient type here ] # if user set sparse gradient mode, the param's gradient # will be SelectedRows, not LoDTensor. But tracer will just @@ -914,8 +902,10 @@ def _run_dygraph(instance, input, program_holder): def drop_scope_if_no_grad(instance, scope_vec): tracer = framework._dygraph_tracer() + scope = scope_vec.value().get_scope() if isinstance(scope_vec, ( + core.VarBase)) else scope_vec[0] if (not instance._is_test) and (not tracer._has_grad): - scope_vec.value().get_scope().drop_kids() + scope.drop_kids() def _run_static_graph(input, program_holder, trace_program): diff --git a/python/paddle/fluid/dygraph/jit.py b/python/paddle/fluid/dygraph/jit.py index b1865691b24..1e1ce3ba7e4 100644 --- a/python/paddle/fluid/dygraph/jit.py +++ b/python/paddle/fluid/dygraph/jit.py @@ -821,7 +821,7 @@ def save(layer, path, input_spec=None, **configs): for var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) - elif isinstance(var, (core.VarBase, Variable)): + elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 53dbf1a66b2..6957850d205 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -760,7 +760,8 @@ class Layer(object): raise KeyError("The name of buffer can not be empty.") elif hasattr(self, name) and name not in self._buffers: raise KeyError("attribute '{}' already exists.".format(name)) - elif tensor is not None and not type(tensor) == core.VarBase: + elif tensor is not None and not (type(tensor) == core.VarBase or + type(tensor) == core.eager.Tensor): raise TypeError( "The registered buffer should be a core.VarBase, but received {}.". format(type(tensor).__name__)) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py index cac64c73913..2b8307461b8 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py @@ -27,6 +27,7 @@ from paddle.fluid.dygraph.nn import Conv2D, Linear, Pool2D from paddle.fluid.optimizer import AdamOptimizer from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator +from paddle.fluid.framework import _test_eager_guard from predictor_utils import PredictorTools @@ -155,6 +156,13 @@ class TestMNISTWithToStatic(TestMNIST): np.allclose(dygraph_loss, static_loss), msg='dygraph is {}\n static_res is \n{}'.format(dygraph_loss, static_loss)) + with _test_eager_guard(): + dygraph_loss = self.train_dygraph() + static_loss = self.train_static() + self.assertTrue( + np.allclose(dygraph_loss, static_loss), + msg='dygraph is {}\n static_res is \n{}'.format(dygraph_loss, + static_loss)) def test_mnist_declarative_cpu_vs_mkldnn(self): dygraph_loss_cpu = self.train_dygraph() diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index f06c45cc369..7c0c71951aa 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -193,7 +193,7 @@ class InputSpec(object): print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) """ - if isinstance(tensor, (Variable, core.VarBase)): + if isinstance(tensor, (Variable, core.VarBase, core.eager.Tensor)): return cls(tensor.shape, tensor.dtype, name or tensor.name) else: raise ValueError( -- GitLab