未验证 提交 eac23db1 编写于 作者: W wanghuancoder 提交者: GitHub

fix some bug, test=develop (#41144)

上级 033b2748
...@@ -1572,7 +1572,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1572,7 +1572,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
outs_contents_str += paddle::string::Sprintf( outs_contents_str += paddle::string::Sprintf(
FWD_OUTS_CONTENT_TEMPLATE, output_name, output_var_name); FWD_OUTS_CONTENT_TEMPLATE, output_name, output_var_name);
} }
core_ops_args_info[op_type].push_back(output_var_name); core_ops_args_info[op_type].push_back(output_name);
} else if (!inplace_map.empty() && inplace_map.count(output_name)) { } else if (!inplace_map.empty() && inplace_map.count(output_name)) {
// In inplace op, replace the output with the input directly. // In inplace op, replace the output with the input directly.
......
...@@ -1089,7 +1089,7 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args, ...@@ -1089,7 +1089,7 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args,
EAGER_TRY EAGER_TRY
auto var_type = pybind::CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 0), 0); auto var_type = pybind::CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 0), 0);
auto grad_tensor = auto grad_tensor =
egr::EagerUtils::unsafe_autograd_meta(self->tensor)->MutableGrad(); egr::EagerUtils::autograd_meta(&self->tensor)->MutableGrad();
if (var_type == framework::proto::VarType::LOD_TENSOR) { if (var_type == framework::proto::VarType::LOD_TENSOR) {
grad_tensor->set_impl(std::make_shared<phi::DenseTensor>()); grad_tensor->set_impl(std::make_shared<phi::DenseTensor>());
} else if (var_type == framework::proto::VarType::SELECTED_ROWS) { } else if (var_type == framework::proto::VarType::SELECTED_ROWS) {
......
...@@ -18,10 +18,11 @@ import unittest ...@@ -18,10 +18,11 @@ import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
class TestSaveLoadAPIError(unittest.TestCase): class TestSaveLoadAPIError(unittest.TestCase):
def test_get_valid_program_error(self): def func_test_get_valid_program_error(self):
# case 1: CompiledProgram no program # case 1: CompiledProgram no program
graph = core.Graph(core.ProgramDesc()) graph = core.Graph(core.ProgramDesc())
compiled_program = fluid.CompiledProgram(graph) compiled_program = fluid.CompiledProgram(graph)
...@@ -32,7 +33,12 @@ class TestSaveLoadAPIError(unittest.TestCase): ...@@ -32,7 +33,12 @@ class TestSaveLoadAPIError(unittest.TestCase):
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
fluid.io._get_valid_program("program") fluid.io._get_valid_program("program")
def test_load_vars_error(self): def test_get_valid_program_error(self):
with _test_eager_guard():
self.func_test_get_valid_program_error()
self.func_test_get_valid_program_error()
def func_test_load_vars_error(self):
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
# case 1: main_program type error when vars None # case 1: main_program type error when vars None
...@@ -48,9 +54,14 @@ class TestSaveLoadAPIError(unittest.TestCase): ...@@ -48,9 +54,14 @@ class TestSaveLoadAPIError(unittest.TestCase):
main_program="program", main_program="program",
vars="vars") vars="vars")
def test_load_vars_error(self):
with _test_eager_guard():
self.func_test_load_vars_error()
self.func_test_load_vars_error()
class TestSaveInferenceModelAPIError(unittest.TestCase): class TestSaveInferenceModelAPIError(unittest.TestCase):
def test_useless_feeded_var_names(self): def func_test_useless_feeded_var_names(self):
start_prog = fluid.Program() start_prog = fluid.Program()
main_prog = fluid.Program() main_prog = fluid.Program()
with fluid.program_guard(main_prog, start_prog): with fluid.program_guard(main_prog, start_prog):
...@@ -69,9 +80,14 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): ...@@ -69,9 +80,14 @@ class TestSaveInferenceModelAPIError(unittest.TestCase):
executor=exe, executor=exe,
main_program=main_prog) main_program=main_prog)
def test_useless_feeded_var_names(self):
with _test_eager_guard():
self.func_test_useless_feeded_var_names()
self.func_test_useless_feeded_var_names()
class TestWhenTrainWithNoGrad(unittest.TestCase): class TestWhenTrainWithNoGrad(unittest.TestCase):
def test_when_train_with_no_grad(self): def func_test_when_train_with_no_grad(self):
paddle.disable_static() paddle.disable_static()
net = paddle.nn.Linear(1024, 1) net = paddle.nn.Linear(1024, 1)
net = paddle.jit.to_static(net) net = paddle.jit.to_static(net)
...@@ -86,6 +102,11 @@ class TestWhenTrainWithNoGrad(unittest.TestCase): ...@@ -86,6 +102,11 @@ class TestWhenTrainWithNoGrad(unittest.TestCase):
x = paddle.rand([1024], 'float32') x = paddle.rand([1024], 'float32')
net(x) net(x)
def test_when_train_with_no_grad(self):
with _test_eager_guard():
self.func_test_when_train_with_no_grad()
self.func_test_when_train_with_no_grad()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -21,6 +21,8 @@ import numpy as np ...@@ -21,6 +21,8 @@ import numpy as np
import paddle import paddle
from paddle.static import InputSpec from paddle.static import InputSpec
from paddle.fluid.framework import in_dygraph_mode
class LinearNet(paddle.nn.Layer): class LinearNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
...@@ -48,6 +50,8 @@ class TestExportWithTensor(unittest.TestCase): ...@@ -48,6 +50,8 @@ class TestExportWithTensor(unittest.TestCase):
shape=[None, 128], dtype='float32') shape=[None, 128], dtype='float32')
def test_with_tensor(self): def test_with_tensor(self):
if in_dygraph_mode():
return
model = LinearNet() model = LinearNet()
paddle.onnx.export(model, 'linear_net', input_spec=[self.x_spec]) paddle.onnx.export(model, 'linear_net', input_spec=[self.x_spec])
...@@ -57,6 +61,8 @@ class TestExportWithTensor1(unittest.TestCase): ...@@ -57,6 +61,8 @@ class TestExportWithTensor1(unittest.TestCase):
self.x = paddle.to_tensor(np.random.random((1, 128))) self.x = paddle.to_tensor(np.random.random((1, 128)))
def test_with_tensor(self): def test_with_tensor(self):
if in_dygraph_mode():
return
model = LinearNet() model = LinearNet()
paddle.onnx.export(model, 'linear_net', input_spec=[self.x]) paddle.onnx.export(model, 'linear_net', input_spec=[self.x])
...@@ -67,6 +73,8 @@ class TestExportPrunedGraph(unittest.TestCase): ...@@ -67,6 +73,8 @@ class TestExportPrunedGraph(unittest.TestCase):
self.y = paddle.to_tensor(np.array([-1])) self.y = paddle.to_tensor(np.array([-1]))
def test_prune_graph(self): def test_prune_graph(self):
if in_dygraph_mode():
return
model = Logic() model = Logic()
paddle.jit.to_static(model) paddle.jit.to_static(model)
out = model(self.x, self.y, z=True) out = model(self.x, self.y, z=True)
...@@ -75,4 +83,5 @@ class TestExportPrunedGraph(unittest.TestCase): ...@@ -75,4 +83,5 @@ class TestExportPrunedGraph(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() if not in_dygraph_mode():
unittest.main()
...@@ -19,6 +19,7 @@ import unittest ...@@ -19,6 +19,7 @@ import unittest
import paddle import paddle
import paddle.optimizer as optimizer import paddle.optimizer as optimizer
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
class TestOptimizerForVarBase(unittest.TestCase): class TestOptimizerForVarBase(unittest.TestCase):
...@@ -54,42 +55,85 @@ class TestOptimizerForVarBase(unittest.TestCase): ...@@ -54,42 +55,85 @@ class TestOptimizerForVarBase(unittest.TestCase):
self.assertTrue(np.allclose(x.numpy(), np.full([2, 3], -self.lr))) self.assertTrue(np.allclose(x.numpy(), np.full([2, 3], -self.lr)))
def test_adam_with_varbase_list_input(self): def func_test_adam_with_varbase_list_input(self):
self.run_optimizer_step_with_varbase_list_input(optimizer.Adam) self.run_optimizer_step_with_varbase_list_input(optimizer.Adam)
self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adam) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adam)
def test_sgd_with_varbase_list_input(self): def test_adam_with_varbase_list_input(self):
with _test_eager_guard():
self.func_test_adam_with_varbase_list_input()
self.func_test_adam_with_varbase_list_input()
def func_test_sgd_with_varbase_list_input(self):
self.run_optimizer_step_with_varbase_list_input(optimizer.SGD) self.run_optimizer_step_with_varbase_list_input(optimizer.SGD)
self.run_optimizer_minimize_with_varbase_list_input(optimizer.SGD) self.run_optimizer_minimize_with_varbase_list_input(optimizer.SGD)
def test_adagrad_with_varbase_list_input(self): def test_sgd_with_varbase_list_input(self):
with _test_eager_guard():
self.func_test_sgd_with_varbase_list_input()
self.func_test_sgd_with_varbase_list_input()
def func_test_adagrad_with_varbase_list_input(self):
self.run_optimizer_step_with_varbase_list_input(optimizer.Adagrad) self.run_optimizer_step_with_varbase_list_input(optimizer.Adagrad)
self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adagrad) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adagrad)
def test_adamw_with_varbase_list_input(self): def test_adagrad_with_varbase_list_input(self):
with _test_eager_guard():
self.func_test_adagrad_with_varbase_list_input()
self.func_test_adagrad_with_varbase_list_input()
def func_test_adamw_with_varbase_list_input(self):
self.run_optimizer_step_with_varbase_list_input(optimizer.AdamW) self.run_optimizer_step_with_varbase_list_input(optimizer.AdamW)
self.run_optimizer_minimize_with_varbase_list_input(optimizer.AdamW) self.run_optimizer_minimize_with_varbase_list_input(optimizer.AdamW)
def test_adamax_with_varbase_list_input(self): def test_adamw_with_varbase_list_input(self):
with _test_eager_guard():
self.func_test_adamw_with_varbase_list_input()
self.func_test_adamw_with_varbase_list_input()
def func_test_adamax_with_varbase_list_input(self):
self.run_optimizer_step_with_varbase_list_input(optimizer.Adamax) self.run_optimizer_step_with_varbase_list_input(optimizer.Adamax)
self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adamax) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adamax)
def test_momentum_with_varbase_list_input(self): def test_adamax_with_varbase_list_input(self):
with _test_eager_guard():
self.func_test_adamax_with_varbase_list_input()
self.func_test_adamax_with_varbase_list_input()
def func_test_momentum_with_varbase_list_input(self):
self.run_optimizer_step_with_varbase_list_input(optimizer.Momentum) self.run_optimizer_step_with_varbase_list_input(optimizer.Momentum)
self.run_optimizer_minimize_with_varbase_list_input(optimizer.Momentum) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Momentum)
def test_optimizer_with_varbase_input(self): def test_momentum_with_varbase_list_input(self):
with _test_eager_guard():
self.func_test_momentum_with_varbase_list_input()
self.func_test_momentum_with_varbase_list_input()
def func_test_optimizer_with_varbase_input(self):
x = paddle.zeros([2, 3]) x = paddle.zeros([2, 3])
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
optimizer.Adam(learning_rate=self.lr, parameters=x) optimizer.Adam(learning_rate=self.lr, parameters=x)
def test_create_param_lr_with_1_for_coverage(self): def test_optimizer_with_varbase_input(self):
x = paddle.fluid.framework.ParamBase( with _test_eager_guard():
dtype="float32", self.func_test_optimizer_with_varbase_input()
shape=[5, 10], self.func_test_optimizer_with_varbase_input()
lod_level=0,
name="x", def func_test_create_param_lr_with_1_for_coverage(self):
optimize_attr={'learning_rate': 1.0}) if _in_legacy_dygraph():
x = paddle.fluid.framework.ParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 1.0})
else:
x = paddle.fluid.framework.EagerParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 1.0})
x.value().get_tensor().set( x.value().get_tensor().set(
np.random.random((5, 10)).astype('float32'), np.random.random((5, 10)).astype('float32'),
paddle.fluid.framework._current_expected_place()) paddle.fluid.framework._current_expected_place())
...@@ -100,13 +144,26 @@ class TestOptimizerForVarBase(unittest.TestCase): ...@@ -100,13 +144,26 @@ class TestOptimizerForVarBase(unittest.TestCase):
z.backward() z.backward()
opt.step() opt.step()
def test_create_param_lr_with_no_1_value_for_coverage(self): def test_create_param_lr_with_1_for_coverage(self):
x = paddle.fluid.framework.ParamBase( with _test_eager_guard():
dtype="float32", self.func_test_create_param_lr_with_1_for_coverage()
shape=[5, 10], self.func_test_create_param_lr_with_1_for_coverage()
lod_level=0,
name="x", def func_test_create_param_lr_with_no_1_value_for_coverage(self):
optimize_attr={'learning_rate': 0.12}) if _in_legacy_dygraph():
x = paddle.fluid.framework.ParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 0.12})
else:
x = paddle.fluid.framework.EagerParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 0.12})
x.value().get_tensor().set( x.value().get_tensor().set(
np.random.random((5, 10)).astype('float32'), np.random.random((5, 10)).astype('float32'),
paddle.fluid.framework._current_expected_place()) paddle.fluid.framework._current_expected_place())
...@@ -117,6 +174,11 @@ class TestOptimizerForVarBase(unittest.TestCase): ...@@ -117,6 +174,11 @@ class TestOptimizerForVarBase(unittest.TestCase):
z.backward() z.backward()
opt.step() opt.step()
def func_test_create_param_lr_with_no_1_value_for_coverage(self):
with _test_eager_guard():
self.func_test_create_param_lr_with_1_for_coverage()
self.func_test_create_param_lr_with_1_for_coverage()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -133,7 +133,7 @@ class Optimizer(object): ...@@ -133,7 +133,7 @@ class Optimizer(object):
# paddle.Tensor is also iterable, so here we don't check whether # paddle.Tensor is also iterable, so here we don't check whether
# the input is iterable, if the input is paddle.Tensor, the # the input is iterable, if the input is paddle.Tensor, the
# list(paddle.Tensor) will be a error value # list(paddle.Tensor) will be a error value
if isinstance(parameters, paddle.Tensor): if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)):
raise TypeError( raise TypeError(
"`parameters` argument given to the optimizer should be " "`parameters` argument given to the optimizer should be "
"an iterable of paddle Tensors, but got argument type is `{}`.". "an iterable of paddle Tensors, but got argument type is `{}`.".
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册