未验证 提交 1e045cae 编写于 作者: 0 0x45f 提交者: GitHub

Refine io for test_mnist.py (#40496)

* for test_mnist.py

* remove comments

* using type() replace isinstance()

* valid vars for run program OP in io.py

* open test_mnist in eager_gurad for coverage
上级 4c01763c
......@@ -30,6 +30,7 @@ from paddle.fluid.layers import nn
from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.dygraph.base import switch_to_static_graph
from paddle.fluid.framework import in_dygraph_mode
from paddle import _C_ops
__all__ = ['TranslatedLayer']
......@@ -761,6 +762,21 @@ def _construct_params_and_buffers(model_path,
return var_dict
def _valid_vars(vars):
if vars:
return vars
if framework._in_eager_mode():
return [
core.eager.Tensor(core.VarDesc.VarType.FP32, [], "Fake_var",
core.VarDesc.VarType.RAW, False)
]
else:
return [
core.VarBase(core.VarDesc.VarType.FP32, [], "Fake_var",
core.VarDesc.VarType.RAW, False)
]
def _run_dygraph(instance, input, program_holder):
# 1. prepare inputs, outputs, attrs
......@@ -826,12 +842,7 @@ def _run_dygraph(instance, input, program_holder):
# hold forward variables
if framework._in_eager_mode():
tmp_scope_vec = core.eager.Tensor(
dtype=core.VarDesc.VarType.FP32,
dims=[],
name="program_out_scope",
type=core.VarDesc.VarType.STEP_SCOPES,
persistable=True)
tmp_scope_vec = [program_holder.scope]
else:
tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [],
"program_out_scope",
......@@ -852,41 +863,18 @@ def _run_dygraph(instance, input, program_holder):
var_desc.shape(),
var_desc.name(), var_desc.type(), False)
double_grad_vars.append(var)
if len(double_grad_vars) == 0:
if framework._in_eager_mode():
double_grad_vars = [
core.eager.Tensor(
value=[1],
name='Fake_var',
place=framework._current_expected_place())
]
else:
double_grad_vars = [
core.VarBase(
value=[1],
name='Fake_var',
place=framework._current_expected_place())
]
# 2. run program by op
trace_program = program_holder.infer_program if instance._is_test else program_holder.train_program
end_op_index = program_holder.infer_program.block(0).op_size()
framework._dygraph_tracer().trace_op(
type='run_program',
inputs={'X': input_vars,
'Params': persistable_vars},
outputs={
'Out': output_vars,
'OutScope': tmp_scope_vec,
'DOut': double_grad_vars
},
attrs={
'global_block': trace_program.block(0),
'start_op_index': 0,
'end_op_index': end_op_index,
'is_test': instance._is_test,
'program_id': _hash_with_id(trace_program, instance)
})
attrs = ('global_block', trace_program.block(0), 'start_op_index', 0,
'end_op_index', end_op_index, 'is_test', instance._is_test,
'program_id', _hash_with_id(trace_program, instance))
_C_ops.run_program(
_valid_vars(input_vars),
_valid_vars(persistable_vars),
_valid_vars(output_vars), tmp_scope_vec,
_valid_vars(double_grad_vars), *attrs)
# NOTE: [ why need set param's gradient type here ]
# if user set sparse gradient mode, the param's gradient
# will be SelectedRows, not LoDTensor. But tracer will just
......@@ -914,8 +902,10 @@ def _run_dygraph(instance, input, program_holder):
def drop_scope_if_no_grad(instance, scope_vec):
tracer = framework._dygraph_tracer()
scope = scope_vec.value().get_scope() if isinstance(scope_vec, (
core.VarBase)) else scope_vec[0]
if (not instance._is_test) and (not tracer._has_grad):
scope_vec.value().get_scope().drop_kids()
scope.drop_kids()
def _run_static_graph(input, program_holder, trace_program):
......
......@@ -821,7 +821,7 @@ def save(layer, path, input_spec=None, **configs):
for var in flatten(input_spec):
if isinstance(var, paddle.static.InputSpec):
inner_input_spec.append(var)
elif isinstance(var, (core.VarBase, Variable)):
elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
inner_input_spec.append(
paddle.static.InputSpec.from_tensor(var))
else:
......
......@@ -760,7 +760,8 @@ class Layer(object):
raise KeyError("The name of buffer can not be empty.")
elif hasattr(self, name) and name not in self._buffers:
raise KeyError("attribute '{}' already exists.".format(name))
elif tensor is not None and not type(tensor) == core.VarBase:
elif tensor is not None and not (type(tensor) == core.VarBase or
type(tensor) == core.eager.Tensor):
raise TypeError(
"The registered buffer should be a core.VarBase, but received {}.".
format(type(tensor).__name__))
......
......@@ -27,6 +27,7 @@ from paddle.fluid.dygraph.nn import Conv2D, Linear, Pool2D
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.framework import _test_eager_guard
from predictor_utils import PredictorTools
......@@ -155,6 +156,13 @@ class TestMNISTWithToStatic(TestMNIST):
np.allclose(dygraph_loss, static_loss),
msg='dygraph is {}\n static_res is \n{}'.format(dygraph_loss,
static_loss))
with _test_eager_guard():
dygraph_loss = self.train_dygraph()
static_loss = self.train_static()
self.assertTrue(
np.allclose(dygraph_loss, static_loss),
msg='dygraph is {}\n static_res is \n{}'.format(dygraph_loss,
static_loss))
def test_mnist_declarative_cpu_vs_mkldnn(self):
dygraph_loss_cpu = self.train_dygraph()
......
......@@ -193,7 +193,7 @@ class InputSpec(object):
print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x)
"""
if isinstance(tensor, (Variable, core.VarBase)):
if isinstance(tensor, (Variable, core.VarBase, core.eager.Tensor)):
return cls(tensor.shape, tensor.dtype, name or tensor.name)
else:
raise ValueError(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册