From eac23db1fce1eef7aa86b2e52c404c6f578f2543 Mon Sep 17 00:00:00 2001 From: wanghuancoder Date: Thu, 31 Mar 2022 16:13:11 +0800 Subject: [PATCH] fix some bug, test=develop (#41144) --- .../auto_code_generator/eager_generator.cc | 2 +- paddle/fluid/pybind/eager_method.cc | 2 +- .../tests/unittests/test_io_save_load.py | 29 ++++- .../fluid/tests/unittests/test_onnx_export.py | 11 +- .../unittests/test_optimizer_for_varbase.py | 104 ++++++++++++++---- python/paddle/optimizer/optimizer.py | 2 +- 6 files changed, 121 insertions(+), 29 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 3c322565884..9039cf8eba9 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -1572,7 +1572,7 @@ static std::pair GenerateForwardFunctionContents( outs_contents_str += paddle::string::Sprintf( FWD_OUTS_CONTENT_TEMPLATE, output_name, output_var_name); } - core_ops_args_info[op_type].push_back(output_var_name); + core_ops_args_info[op_type].push_back(output_name); } else if (!inplace_map.empty() && inplace_map.count(output_name)) { // In inplace op, replace the output with the input directly. diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 1f2ab946752..37ace14d145 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1089,7 +1089,7 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args, EAGER_TRY auto var_type = pybind::CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 0), 0); auto grad_tensor = - egr::EagerUtils::unsafe_autograd_meta(self->tensor)->MutableGrad(); + egr::EagerUtils::autograd_meta(&self->tensor)->MutableGrad(); if (var_type == framework::proto::VarType::LOD_TENSOR) { grad_tensor->set_impl(std::make_shared()); } else if (var_type == framework::proto::VarType::SELECTED_ROWS) { diff --git a/python/paddle/fluid/tests/unittests/test_io_save_load.py b/python/paddle/fluid/tests/unittests/test_io_save_load.py index 83aadbf68d5..a9a223f8f99 100644 --- a/python/paddle/fluid/tests/unittests/test_io_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_io_save_load.py @@ -18,10 +18,11 @@ import unittest import paddle import paddle.fluid as fluid from paddle.fluid import core +from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph class TestSaveLoadAPIError(unittest.TestCase): - def test_get_valid_program_error(self): + def func_test_get_valid_program_error(self): # case 1: CompiledProgram no program graph = core.Graph(core.ProgramDesc()) compiled_program = fluid.CompiledProgram(graph) @@ -32,7 +33,12 @@ class TestSaveLoadAPIError(unittest.TestCase): with self.assertRaises(TypeError): fluid.io._get_valid_program("program") - def test_load_vars_error(self): + def test_get_valid_program_error(self): + with _test_eager_guard(): + self.func_test_get_valid_program_error() + self.func_test_get_valid_program_error() + + def func_test_load_vars_error(self): place = fluid.CPUPlace() exe = fluid.Executor(place) # case 1: main_program type error when vars None @@ -48,9 +54,14 @@ class TestSaveLoadAPIError(unittest.TestCase): main_program="program", vars="vars") + def test_load_vars_error(self): + with _test_eager_guard(): + self.func_test_load_vars_error() + self.func_test_load_vars_error() + class TestSaveInferenceModelAPIError(unittest.TestCase): - def test_useless_feeded_var_names(self): + def func_test_useless_feeded_var_names(self): start_prog = fluid.Program() main_prog = fluid.Program() with fluid.program_guard(main_prog, start_prog): @@ -69,9 +80,14 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): executor=exe, main_program=main_prog) + def test_useless_feeded_var_names(self): + with _test_eager_guard(): + self.func_test_useless_feeded_var_names() + self.func_test_useless_feeded_var_names() + class TestWhenTrainWithNoGrad(unittest.TestCase): - def test_when_train_with_no_grad(self): + def func_test_when_train_with_no_grad(self): paddle.disable_static() net = paddle.nn.Linear(1024, 1) net = paddle.jit.to_static(net) @@ -86,6 +102,11 @@ class TestWhenTrainWithNoGrad(unittest.TestCase): x = paddle.rand([1024], 'float32') net(x) + def test_when_train_with_no_grad(self): + with _test_eager_guard(): + self.func_test_when_train_with_no_grad() + self.func_test_when_train_with_no_grad() + if __name__ == '__main__': paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_onnx_export.py b/python/paddle/fluid/tests/unittests/test_onnx_export.py index b0790b28a92..5efd586d849 100644 --- a/python/paddle/fluid/tests/unittests/test_onnx_export.py +++ b/python/paddle/fluid/tests/unittests/test_onnx_export.py @@ -21,6 +21,8 @@ import numpy as np import paddle from paddle.static import InputSpec +from paddle.fluid.framework import in_dygraph_mode + class LinearNet(paddle.nn.Layer): def __init__(self): @@ -48,6 +50,8 @@ class TestExportWithTensor(unittest.TestCase): shape=[None, 128], dtype='float32') def test_with_tensor(self): + if in_dygraph_mode(): + return model = LinearNet() paddle.onnx.export(model, 'linear_net', input_spec=[self.x_spec]) @@ -57,6 +61,8 @@ class TestExportWithTensor1(unittest.TestCase): self.x = paddle.to_tensor(np.random.random((1, 128))) def test_with_tensor(self): + if in_dygraph_mode(): + return model = LinearNet() paddle.onnx.export(model, 'linear_net', input_spec=[self.x]) @@ -67,6 +73,8 @@ class TestExportPrunedGraph(unittest.TestCase): self.y = paddle.to_tensor(np.array([-1])) def test_prune_graph(self): + if in_dygraph_mode(): + return model = Logic() paddle.jit.to_static(model) out = model(self.x, self.y, z=True) @@ -75,4 +83,5 @@ class TestExportPrunedGraph(unittest.TestCase): if __name__ == '__main__': - unittest.main() + if not in_dygraph_mode(): + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py b/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py index 8fdedce2246..b2b133a6b42 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py @@ -19,6 +19,7 @@ import unittest import paddle import paddle.optimizer as optimizer +from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph class TestOptimizerForVarBase(unittest.TestCase): @@ -54,42 +55,85 @@ class TestOptimizerForVarBase(unittest.TestCase): self.assertTrue(np.allclose(x.numpy(), np.full([2, 3], -self.lr))) - def test_adam_with_varbase_list_input(self): + def func_test_adam_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Adam) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adam) - def test_sgd_with_varbase_list_input(self): + def test_adam_with_varbase_list_input(self): + with _test_eager_guard(): + self.func_test_adam_with_varbase_list_input() + self.func_test_adam_with_varbase_list_input() + + def func_test_sgd_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.SGD) self.run_optimizer_minimize_with_varbase_list_input(optimizer.SGD) - def test_adagrad_with_varbase_list_input(self): + def test_sgd_with_varbase_list_input(self): + with _test_eager_guard(): + self.func_test_sgd_with_varbase_list_input() + self.func_test_sgd_with_varbase_list_input() + + def func_test_adagrad_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Adagrad) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adagrad) - def test_adamw_with_varbase_list_input(self): + def test_adagrad_with_varbase_list_input(self): + with _test_eager_guard(): + self.func_test_adagrad_with_varbase_list_input() + self.func_test_adagrad_with_varbase_list_input() + + def func_test_adamw_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.AdamW) self.run_optimizer_minimize_with_varbase_list_input(optimizer.AdamW) - def test_adamax_with_varbase_list_input(self): + def test_adamw_with_varbase_list_input(self): + with _test_eager_guard(): + self.func_test_adamw_with_varbase_list_input() + self.func_test_adamw_with_varbase_list_input() + + def func_test_adamax_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Adamax) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adamax) - def test_momentum_with_varbase_list_input(self): + def test_adamax_with_varbase_list_input(self): + with _test_eager_guard(): + self.func_test_adamax_with_varbase_list_input() + self.func_test_adamax_with_varbase_list_input() + + def func_test_momentum_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Momentum) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Momentum) - def test_optimizer_with_varbase_input(self): + def test_momentum_with_varbase_list_input(self): + with _test_eager_guard(): + self.func_test_momentum_with_varbase_list_input() + self.func_test_momentum_with_varbase_list_input() + + def func_test_optimizer_with_varbase_input(self): x = paddle.zeros([2, 3]) with self.assertRaises(TypeError): optimizer.Adam(learning_rate=self.lr, parameters=x) - def test_create_param_lr_with_1_for_coverage(self): - x = paddle.fluid.framework.ParamBase( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="x", - optimize_attr={'learning_rate': 1.0}) + def test_optimizer_with_varbase_input(self): + with _test_eager_guard(): + self.func_test_optimizer_with_varbase_input() + self.func_test_optimizer_with_varbase_input() + + def func_test_create_param_lr_with_1_for_coverage(self): + if _in_legacy_dygraph(): + x = paddle.fluid.framework.ParamBase( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="x", + optimize_attr={'learning_rate': 1.0}) + else: + x = paddle.fluid.framework.EagerParamBase( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="x", + optimize_attr={'learning_rate': 1.0}) x.value().get_tensor().set( np.random.random((5, 10)).astype('float32'), paddle.fluid.framework._current_expected_place()) @@ -100,13 +144,26 @@ class TestOptimizerForVarBase(unittest.TestCase): z.backward() opt.step() - def test_create_param_lr_with_no_1_value_for_coverage(self): - x = paddle.fluid.framework.ParamBase( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="x", - optimize_attr={'learning_rate': 0.12}) + def test_create_param_lr_with_1_for_coverage(self): + with _test_eager_guard(): + self.func_test_create_param_lr_with_1_for_coverage() + self.func_test_create_param_lr_with_1_for_coverage() + + def func_test_create_param_lr_with_no_1_value_for_coverage(self): + if _in_legacy_dygraph(): + x = paddle.fluid.framework.ParamBase( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="x", + optimize_attr={'learning_rate': 0.12}) + else: + x = paddle.fluid.framework.EagerParamBase( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="x", + optimize_attr={'learning_rate': 0.12}) x.value().get_tensor().set( np.random.random((5, 10)).astype('float32'), paddle.fluid.framework._current_expected_place()) @@ -117,6 +174,11 @@ class TestOptimizerForVarBase(unittest.TestCase): z.backward() opt.step() + def func_test_create_param_lr_with_no_1_value_for_coverage(self): + with _test_eager_guard(): + self.func_test_create_param_lr_with_1_for_coverage() + self.func_test_create_param_lr_with_1_for_coverage() + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index 47111f01e58..36b773ac285 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -133,7 +133,7 @@ class Optimizer(object): # paddle.Tensor is also iterable, so here we don't check whether # the input is iterable, if the input is paddle.Tensor, the # list(paddle.Tensor) will be a error value - if isinstance(parameters, paddle.Tensor): + if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)): raise TypeError( "`parameters` argument given to the optimizer should be " "an iterable of paddle Tensors, but got argument type is `{}`.". -- GitLab