未验证 提交 1e045cae 编写于 作者: 0 0x45f 提交者: GitHub

Refine io for test_mnist.py (#40496)

* for test_mnist.py

* remove comments

* using type() replace isinstance()

* valid vars for run program OP in io.py

* open test_mnist in eager_gurad for coverage
上级 4c01763c
...@@ -30,6 +30,7 @@ from paddle.fluid.layers import nn ...@@ -30,6 +30,7 @@ from paddle.fluid.layers import nn
from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.dygraph.base import switch_to_static_graph from paddle.fluid.dygraph.base import switch_to_static_graph
from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
from paddle import _C_ops
__all__ = ['TranslatedLayer'] __all__ = ['TranslatedLayer']
...@@ -761,6 +762,21 @@ def _construct_params_and_buffers(model_path, ...@@ -761,6 +762,21 @@ def _construct_params_and_buffers(model_path,
return var_dict return var_dict
def _valid_vars(vars):
if vars:
return vars
if framework._in_eager_mode():
return [
core.eager.Tensor(core.VarDesc.VarType.FP32, [], "Fake_var",
core.VarDesc.VarType.RAW, False)
]
else:
return [
core.VarBase(core.VarDesc.VarType.FP32, [], "Fake_var",
core.VarDesc.VarType.RAW, False)
]
def _run_dygraph(instance, input, program_holder): def _run_dygraph(instance, input, program_holder):
# 1. prepare inputs, outputs, attrs # 1. prepare inputs, outputs, attrs
...@@ -826,17 +842,12 @@ def _run_dygraph(instance, input, program_holder): ...@@ -826,17 +842,12 @@ def _run_dygraph(instance, input, program_holder):
# hold forward variables # hold forward variables
if framework._in_eager_mode(): if framework._in_eager_mode():
tmp_scope_vec = core.eager.Tensor( tmp_scope_vec = [program_holder.scope]
dtype=core.VarDesc.VarType.FP32,
dims=[],
name="program_out_scope",
type=core.VarDesc.VarType.STEP_SCOPES,
persistable=True)
else: else:
tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [], tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [],
"program_out_scope", "program_out_scope",
core.VarDesc.VarType.STEP_SCOPES, True) core.VarDesc.VarType.STEP_SCOPES, True)
tmp_scope_vec.value().set_scope(program_holder.scope) tmp_scope_vec.value().set_scope(program_holder.scope)
double_grad_vars = [] double_grad_vars = []
for var_desc in program_holder.double_grad_descs: for var_desc in program_holder.double_grad_descs:
...@@ -852,41 +863,18 @@ def _run_dygraph(instance, input, program_holder): ...@@ -852,41 +863,18 @@ def _run_dygraph(instance, input, program_holder):
var_desc.shape(), var_desc.shape(),
var_desc.name(), var_desc.type(), False) var_desc.name(), var_desc.type(), False)
double_grad_vars.append(var) double_grad_vars.append(var)
if len(double_grad_vars) == 0:
if framework._in_eager_mode():
double_grad_vars = [
core.eager.Tensor(
value=[1],
name='Fake_var',
place=framework._current_expected_place())
]
else:
double_grad_vars = [
core.VarBase(
value=[1],
name='Fake_var',
place=framework._current_expected_place())
]
# 2. run program by op # 2. run program by op
trace_program = program_holder.infer_program if instance._is_test else program_holder.train_program trace_program = program_holder.infer_program if instance._is_test else program_holder.train_program
end_op_index = program_holder.infer_program.block(0).op_size() end_op_index = program_holder.infer_program.block(0).op_size()
framework._dygraph_tracer().trace_op( attrs = ('global_block', trace_program.block(0), 'start_op_index', 0,
type='run_program', 'end_op_index', end_op_index, 'is_test', instance._is_test,
inputs={'X': input_vars, 'program_id', _hash_with_id(trace_program, instance))
'Params': persistable_vars}, _C_ops.run_program(
outputs={ _valid_vars(input_vars),
'Out': output_vars, _valid_vars(persistable_vars),
'OutScope': tmp_scope_vec, _valid_vars(output_vars), tmp_scope_vec,
'DOut': double_grad_vars _valid_vars(double_grad_vars), *attrs)
},
attrs={
'global_block': trace_program.block(0),
'start_op_index': 0,
'end_op_index': end_op_index,
'is_test': instance._is_test,
'program_id': _hash_with_id(trace_program, instance)
})
# NOTE: [ why need set param's gradient type here ] # NOTE: [ why need set param's gradient type here ]
# if user set sparse gradient mode, the param's gradient # if user set sparse gradient mode, the param's gradient
# will be SelectedRows, not LoDTensor. But tracer will just # will be SelectedRows, not LoDTensor. But tracer will just
...@@ -914,8 +902,10 @@ def _run_dygraph(instance, input, program_holder): ...@@ -914,8 +902,10 @@ def _run_dygraph(instance, input, program_holder):
def drop_scope_if_no_grad(instance, scope_vec): def drop_scope_if_no_grad(instance, scope_vec):
tracer = framework._dygraph_tracer() tracer = framework._dygraph_tracer()
scope = scope_vec.value().get_scope() if isinstance(scope_vec, (
core.VarBase)) else scope_vec[0]
if (not instance._is_test) and (not tracer._has_grad): if (not instance._is_test) and (not tracer._has_grad):
scope_vec.value().get_scope().drop_kids() scope.drop_kids()
def _run_static_graph(input, program_holder, trace_program): def _run_static_graph(input, program_holder, trace_program):
......
...@@ -821,7 +821,7 @@ def save(layer, path, input_spec=None, **configs): ...@@ -821,7 +821,7 @@ def save(layer, path, input_spec=None, **configs):
for var in flatten(input_spec): for var in flatten(input_spec):
if isinstance(var, paddle.static.InputSpec): if isinstance(var, paddle.static.InputSpec):
inner_input_spec.append(var) inner_input_spec.append(var)
elif isinstance(var, (core.VarBase, Variable)): elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
inner_input_spec.append( inner_input_spec.append(
paddle.static.InputSpec.from_tensor(var)) paddle.static.InputSpec.from_tensor(var))
else: else:
......
...@@ -760,7 +760,8 @@ class Layer(object): ...@@ -760,7 +760,8 @@ class Layer(object):
raise KeyError("The name of buffer can not be empty.") raise KeyError("The name of buffer can not be empty.")
elif hasattr(self, name) and name not in self._buffers: elif hasattr(self, name) and name not in self._buffers:
raise KeyError("attribute '{}' already exists.".format(name)) raise KeyError("attribute '{}' already exists.".format(name))
elif tensor is not None and not type(tensor) == core.VarBase: elif tensor is not None and not (type(tensor) == core.VarBase or
type(tensor) == core.eager.Tensor):
raise TypeError( raise TypeError(
"The registered buffer should be a core.VarBase, but received {}.". "The registered buffer should be a core.VarBase, but received {}.".
format(type(tensor).__name__)) format(type(tensor).__name__))
......
...@@ -27,6 +27,7 @@ from paddle.fluid.dygraph.nn import Conv2D, Linear, Pool2D ...@@ -27,6 +27,7 @@ from paddle.fluid.dygraph.nn import Conv2D, Linear, Pool2D
from paddle.fluid.optimizer import AdamOptimizer from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.framework import _test_eager_guard
from predictor_utils import PredictorTools from predictor_utils import PredictorTools
...@@ -155,6 +156,13 @@ class TestMNISTWithToStatic(TestMNIST): ...@@ -155,6 +156,13 @@ class TestMNISTWithToStatic(TestMNIST):
np.allclose(dygraph_loss, static_loss), np.allclose(dygraph_loss, static_loss),
msg='dygraph is {}\n static_res is \n{}'.format(dygraph_loss, msg='dygraph is {}\n static_res is \n{}'.format(dygraph_loss,
static_loss)) static_loss))
with _test_eager_guard():
dygraph_loss = self.train_dygraph()
static_loss = self.train_static()
self.assertTrue(
np.allclose(dygraph_loss, static_loss),
msg='dygraph is {}\n static_res is \n{}'.format(dygraph_loss,
static_loss))
def test_mnist_declarative_cpu_vs_mkldnn(self): def test_mnist_declarative_cpu_vs_mkldnn(self):
dygraph_loss_cpu = self.train_dygraph() dygraph_loss_cpu = self.train_dygraph()
......
...@@ -193,7 +193,7 @@ class InputSpec(object): ...@@ -193,7 +193,7 @@ class InputSpec(object):
print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x)
""" """
if isinstance(tensor, (Variable, core.VarBase)): if isinstance(tensor, (Variable, core.VarBase, core.eager.Tensor)):
return cls(tensor.shape, tensor.dtype, name or tensor.name) return cls(tensor.shape, tensor.dtype, name or tensor.name)
else: else:
raise ValueError( raise ValueError(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册