From 56c43ccde296e120846b6833932d965a7c56fe0e Mon Sep 17 00:00:00 2001 From: hong <43953930+phlrain@users.noreply.github.com> Date: Mon, 21 Mar 2022 12:12:53 +0800 Subject: [PATCH] Merge some test bug (#40543) * switch eager mode and change it * set default is eager * set default is eager * fix error; test=develop * fix some error; test=develop * update * upd * update code; test=develop * update * fix some bug; test=develop * fix bug; test=develop * fix bug; test=develop * fix bug; test=develop * fix error; test=develop * format; test=develop Co-authored-by: JiabinYang <360788950@qq.com> --- .../auto_code_generator/eager_generator.cc | 10 +++++++- paddle/fluid/eager/grad_tensor_holder.cc | 2 +- paddle/fluid/pybind/eager_utils.cc | 2 ++ python/paddle/fluid/dygraph/base.py | 8 +++---- .../fluid/dygraph/varbase_patch_methods.py | 3 ++- python/paddle/fluid/framework.py | 3 ++- python/paddle/fluid/tests/test_detection.py | 2 ++ .../test_distribution_expfamily.py | 4 ++++ .../tests/unittests/distribution/test_kl.py | 4 ++++ .../sequence/test_sequence_concat.py | 2 ++ .../fluid/tests/unittests/test_Tensor_type.py | 1 + .../fluid/tests/unittests/test_adam_op.py | 1 + .../fluid/tests/unittests/test_base_layer.py | 1 + .../tests/unittests/test_create_parameter.py | 2 ++ .../fluid/tests/unittests/test_ctc_align.py | 2 ++ .../fluid/tests/unittests/test_diff_op.py | 1 + .../unittests/test_dygraph_weight_norm.py | 1 + .../tests/unittests/test_exponential_op.py | 1 + .../fluid/tests/unittests/test_fmin_op.py | 5 ++++ .../fluid/tests/unittests/test_initializer.py | 1 + .../fluid/tests/unittests/test_inner.py | 1 + .../tests/unittests/test_io_save_load.py | 1 + .../fluid/tests/unittests/test_isclose_op.py | 4 ++++ .../fluid/tests/unittests/test_log_softmax.py | 1 + .../tests/unittests/test_lr_scheduler.py | 1 + .../fluid/tests/unittests/test_mean_iou.py | 2 ++ .../fluid/tests/unittests/test_minus_op.py | 2 ++ .../fluid/tests/unittests/test_norm_all.py | 1 + .../tests/unittests/test_paddle_save_load.py | 23 +++++++++++++------ .../fluid/tests/unittests/test_renorm_op.py | 3 ++- .../test_retinanet_detection_output.py | 2 ++ .../tests/unittests/test_smooth_l1_loss.py | 1 + .../fluid/tests/unittests/test_tile_op.py | 3 ++- .../fluid/tests/unittests/test_var_base.py | 1 + python/paddle/hapi/model.py | 5 ++-- python/paddle/metric/metrics.py | 14 +++++------ python/paddle/tensor/logic.py | 2 +- python/paddle/tensor/manipulation.py | 3 +++ python/paddle/tensor/math.py | 2 +- 39 files changed, 100 insertions(+), 28 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index b8d59e8dd8b..df2cdc35626 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -2032,7 +2032,15 @@ static std::string GenerateSingleOpBase( const char* ATTRS_TEMPLATE = " auto& %s = this->attr_map_;\n"; std::string grad_attrs_str = paddle::string::Sprintf(ATTRS_TEMPLATE, attrs_name); - + if (fwd_op_type == "cast") { + // swtich in out dtype + const char* CAST_GRAD = + " auto temp_type = %s[\"in_dtype\"];\n" + " %s[\"in_dtype\"] = %s[\"out_dtype\"];\n" + " %s[\"out_dtype\"] = temp_type;\n"; + grad_attrs_str += paddle::string::Sprintf(CAST_GRAD, attrs_name, attrs_name, + attrs_name, attrs_name); + } // Handle dynamic grad attributes grad_attrs_str += HandleDynamicGradAttributes(fwd_op_type, attrs_name); generated_grad_function_body += grad_attrs_str; diff --git a/paddle/fluid/eager/grad_tensor_holder.cc b/paddle/fluid/eager/grad_tensor_holder.cc index 163d25e85ce..038ad09aa4d 100644 --- a/paddle/fluid/eager/grad_tensor_holder.cc +++ b/paddle/fluid/eager/grad_tensor_holder.cc @@ -93,7 +93,7 @@ void GradTensorHolder::add(size_t slot_id, size_t rank, // Create new tensor->impl and fill it with 1.0 if (t.defined()) { // Fill 1.0 - buffer_[slot_id][rank] = paddle::experimental::ones_like(t); + buffer_[slot_id][rank] = paddle::experimental::ones_like(t, t.dtype()); } } } diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 355b4076a0b..6817fa4bf04 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -62,6 +62,8 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) { return pybind11::detail::npy_api::NPY_INT32_; case phi::DataType::INT64: return pybind11::detail::npy_api::NPY_INT64_; + case phi::DataType::BFLOAT16: + return pybind11::detail::NPY_UINT16_; case phi::DataType::FLOAT16: return pybind11::detail::NPY_FLOAT16_; case phi::DataType::BFLOAT16: diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index 94399828585..b4c5a36d288 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -612,7 +612,7 @@ def grad(outputs, if no_grad_vars is None: no_grad_vars = [] - elif isinstance(no_grad_vars, core.VarBase): + elif isinstance(no_grad_vars, (core.VarBase, core.eager.Tensor)): no_grad_vars = [no_grad_vars] elif isinstance(no_grad_vars, core.eager.Tensor): no_grad_vars = [no_grad_vars] @@ -718,13 +718,13 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): y.shape # [3L, 2L] """ - support_type = (list, tuple, np.ndarray, core.VarBase, framework.Variable, - core.Tensor, core.LoDTensor) + support_type = (list, tuple, np.ndarray, core.eager.Tensor, core.VarBase, + framework.Variable, core.Tensor, core.LoDTensor) if not isinstance(value, support_type): raise TypeError( "The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s." % (support_type, type(value))) - if isinstance(value, (core.VarBase, framework.Variable)): + if isinstance(value, (core.eager.Tensor, core.VarBase, framework.Variable)): return value elif isinstance(value, (core.Tensor, core.LoDTensor)): return core.VarBase(value) diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 2b67a202972..af30b2b2444 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -28,6 +28,7 @@ from .math_op_patch import monkey_patch_math_varbase from .parallel import scale_loss from paddle.fluid.data_feeder import convert_dtype, _PADDLE_DTYPE_2_NUMPY_DTYPE import paddle.utils.deprecated as deprecated +from paddle import _C_ops class TensorHookRemoveHelper(object): @@ -782,7 +783,7 @@ def monkey_patch_varbase(): @framework.dygraph_only def clone(self): - return _C_ops_.assign(self) + return _C_ops.assign(self) @framework.dygraph_only def value(self): diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index d0a94238a7a..fb787215d91 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -316,7 +316,8 @@ def _dygraph_not_support_(func): def _dygraph_only_(func): def __impl__(*args, **kwargs): - assert in_dygraph_mode( + assert ( + in_dygraph_mode() or _in_eager_mode() ), "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__ return func(*args, **kwargs) diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 9348b0b50a1..c4504550920 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -886,6 +886,7 @@ class TestDistributeFpnProposals(LayerTest): refer_level=4, refer_scale=224, rois_num=rois_num_dy) + print(type(multi_rois_dy)) output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy output_dy_np = [] for output in output_dy: @@ -973,4 +974,5 @@ class TestBoxDecoderAndAssign(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py index cc2e14d6d6c..341ec852c52 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py @@ -50,3 +50,7 @@ class TestExponentialFamilyException(unittest.TestCase): def test_entropy_exception(self): with self.assertRaises(NotImplementedError): paddle.distribution.ExponentialFamily.entropy(self.dist) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/distribution/test_kl.py b/python/paddle/fluid/tests/unittests/distribution/test_kl.py index a1413722446..55358380c8b 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_kl.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_kl.py @@ -112,3 +112,7 @@ class TestKLExpfamilyExpFamily(unittest.TestCase): kl._kl_expfamily_expfamily(self.p, self.q), rtol=config.RTOL.get(config.DEFAULT_DTYPE), atol=config.ATOL.get(config.DEFAULT_DTYPE)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py index 737c085dde6..34b6f6dc8e5 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py @@ -20,6 +20,7 @@ import sys sys.path.append("../") from op_test import OpTest +import paddle from paddle import fluid @@ -115,4 +116,5 @@ class TestSequenceConcatOpError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_Tensor_type.py b/python/paddle/fluid/tests/unittests/test_Tensor_type.py index 59395b94279..f1427d29782 100644 --- a/python/paddle/fluid/tests/unittests/test_Tensor_type.py +++ b/python/paddle/fluid/tests/unittests/test_Tensor_type.py @@ -39,6 +39,7 @@ class TensorTypeTest(unittest.TestCase): tensorx = paddle.tensor.logic.Tensor(inx) typex_str = str(type(tensorx)) + expectx = "" self.assertEqual((typex_str == expectx), True) diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index ecac22553cb..d05c9a3c313 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -1202,4 +1202,5 @@ class TestMultiTensorAdam(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index b440e745b10..789cfa82658 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -451,4 +451,5 @@ class TestLayerTo(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_create_parameter.py b/python/paddle/fluid/tests/unittests/test_create_parameter.py index 763fb64816c..199558acd4e 100644 --- a/python/paddle/fluid/tests/unittests/test_create_parameter.py +++ b/python/paddle/fluid/tests/unittests/test_create_parameter.py @@ -18,6 +18,7 @@ import numpy as np import paddle.fluid as fluid from paddle.fluid import Program, program_guard from paddle.fluid import ParamAttr, initializer +import paddle class TestCreateParameterError(unittest.TestCase): @@ -50,4 +51,5 @@ class TestCreateParameterError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_ctc_align.py b/python/paddle/fluid/tests/unittests/test_ctc_align.py index f5934debfd7..ffc5bc184ef 100644 --- a/python/paddle/fluid/tests/unittests/test_ctc_align.py +++ b/python/paddle/fluid/tests/unittests/test_ctc_align.py @@ -20,6 +20,7 @@ import numpy as np from op_test import OpTest from test_softmax_op import stable_softmax import paddle.fluid as fluid +import paddle def CTCAlign(input, lod, blank, merge_repeated, padding=0, input_length=None): @@ -229,4 +230,5 @@ class BadInputTestCTCAlignr(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_diff_op.py b/python/paddle/fluid/tests/unittests/test_diff_op.py index 1ae780f488d..4a96827bd7c 100644 --- a/python/paddle/fluid/tests/unittests/test_diff_op.py +++ b/python/paddle/fluid/tests/unittests/test_diff_op.py @@ -211,4 +211,5 @@ class TestDiffOpPreAppendAxis(TestDiffOp): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py index f95546f15f0..27d82fcc890 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py @@ -190,4 +190,5 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_exponential_op.py b/python/paddle/fluid/tests/unittests/test_exponential_op.py index ccbc0a16763..7a3ae203be6 100644 --- a/python/paddle/fluid/tests/unittests/test_exponential_op.py +++ b/python/paddle/fluid/tests/unittests/test_exponential_op.py @@ -209,4 +209,5 @@ class TestExponentialAPI(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fmin_op.py b/python/paddle/fluid/tests/unittests/test_fmin_op.py index 5cdf096be67..7231823c375 100644 --- a/python/paddle/fluid/tests/unittests/test_fmin_op.py +++ b/python/paddle/fluid/tests/unittests/test_fmin_op.py @@ -189,3 +189,8 @@ class TestElementwiseFmin2Op(OpTest): """test_check_grad_ingore_y""" self.check_grad( ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) + + +if __name__ == "__main__": + paddle.enable_static() + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index bff10c9c4ca..8dc822c69b2 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -1025,4 +1025,5 @@ class TestDiracInitializer3(TestDiracInitializer1): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_inner.py b/python/paddle/fluid/tests/unittests/test_inner.py index de9decd0b89..ff9f15ebbfc 100644 --- a/python/paddle/fluid/tests/unittests/test_inner.py +++ b/python/paddle/fluid/tests/unittests/test_inner.py @@ -163,4 +163,5 @@ class TestMultiplyError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_io_save_load.py b/python/paddle/fluid/tests/unittests/test_io_save_load.py index 89ca28510b9..83aadbf68d5 100644 --- a/python/paddle/fluid/tests/unittests/test_io_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_io_save_load.py @@ -88,4 +88,5 @@ class TestWhenTrainWithNoGrad(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_isclose_op.py b/python/paddle/fluid/tests/unittests/test_isclose_op.py index aa39284d113..2bb58d7c574 100644 --- a/python/paddle/fluid/tests/unittests/test_isclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_isclose_op.py @@ -210,6 +210,9 @@ class TestIscloseOpFloat64(TestIscloseOp): self.atol = np.array([0]).astype("float64") self.equal_nan = False + def test_check_output(self): + self.check_output() + class TestIscloseOpLargeDimInput(TestIscloseOp): def set_args(self): @@ -222,4 +225,5 @@ class TestIscloseOpLargeDimInput(TestIscloseOp): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_log_softmax.py b/python/paddle/fluid/tests/unittests/test_log_softmax.py index 16f954708d4..423eeaf3ada 100644 --- a/python/paddle/fluid/tests/unittests/test_log_softmax.py +++ b/python/paddle/fluid/tests/unittests/test_log_softmax.py @@ -175,4 +175,5 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py index 6d94144fc77..60dd4948f99 100644 --- a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py @@ -555,4 +555,5 @@ class TestLRScheduler(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mean_iou.py b/python/paddle/fluid/tests/unittests/test_mean_iou.py index e2e118ac9e3..4e89a9034a3 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_iou.py +++ b/python/paddle/fluid/tests/unittests/test_mean_iou.py @@ -19,6 +19,7 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid +import paddle def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects, @@ -129,4 +130,5 @@ class TestMeanIOUOpError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_minus_op.py b/python/paddle/fluid/tests/unittests/test_minus_op.py index 54253b17b96..461ff6a9273 100644 --- a/python/paddle/fluid/tests/unittests/test_minus_op.py +++ b/python/paddle/fluid/tests/unittests/test_minus_op.py @@ -17,6 +17,7 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle class TestMinusOp(OpTest): @@ -36,4 +37,5 @@ class TestMinusOp(OpTest): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_norm_all.py b/python/paddle/fluid/tests/unittests/test_norm_all.py index 575bc653618..ef912699455 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_all.py +++ b/python/paddle/fluid/tests/unittests/test_norm_all.py @@ -588,4 +588,5 @@ class API_NormTest(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py index 9e0cf6ddef2..8945d35c131 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py @@ -315,7 +315,9 @@ class TestSaveLoadAny(unittest.TestCase): paddle.save(tensor, path) t_dygraph = paddle.load(path) np_dygraph = paddle.load(path, return_numpy=True) - self.assertTrue(isinstance(t_dygraph, paddle.fluid.core.VarBase)) + self.assertTrue( + isinstance(t_dygraph, (paddle.fluid.core.VarBase, + paddle.fluid.core.eager.Tensor))) self.assertTrue(np.array_equal(tensor.numpy(), np_dygraph)) self.assertTrue(np.array_equal(tensor.numpy(), t_dygraph.numpy())) paddle.enable_static() @@ -685,27 +687,34 @@ class TestSaveLoadAny(unittest.TestCase): np.array(v), np.array(load_tensor2['k2'][k]))) self.assertTrue(load_tensor2['epoch'] == 123) - self.assertTrue(isinstance(load_tensor3[0], fluid.core.VarBase)) + self.assertTrue( + isinstance(load_tensor3[0], (fluid.core.VarBase, + fluid.core.eager.Tensor))) self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0])) - self.assertTrue(isinstance(load_tensor3[1], fluid.core.VarBase)) + self.assertTrue( + isinstance(load_tensor3[1], (fluid.core.VarBase, + fluid.core.eager.Tensor))) self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1])) for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["state_dict"][k], - fluid.core.VarBase)) + isinstance(load_tensor3[2]["state_dict"][k], ( + fluid.core.VarBase, fluid.core.eager.Tensor))) self.assertTrue( np.array_equal(load_tensor3[2]["state_dict"][k].numpy(), np.array(v))) for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["opt"][k], fluid.core.VarBase)) + isinstance(load_tensor3[2]["opt"][k], ( + fluid.core.VarBase, fluid.core.eager.Tensor))) self.assertTrue( np.array_equal(load_tensor3[2]["opt"][k].numpy(), np.array(v))) - self.assertTrue(isinstance(load_tensor4[0], fluid.core.VarBase)) + self.assertTrue( + isinstance(load_tensor4[0], (fluid.core.VarBase, + fluid.core.eager.Tensor))) self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0])) load_array1 = paddle.load(path1, return_numpy=True) diff --git a/python/paddle/fluid/tests/unittests/test_renorm_op.py b/python/paddle/fluid/tests/unittests/test_renorm_op.py index 3ea2002a978..e00a892cf71 100644 --- a/python/paddle/fluid/tests/unittests/test_renorm_op.py +++ b/python/paddle/fluid/tests/unittests/test_renorm_op.py @@ -54,7 +54,7 @@ class TestRenormAPI(unittest.TestCase): def test_dygraph_api(self): self.input_data() # case axis none - with fluid.dygraph.guard(): + with fluid.dygraph.guard(fluid.CPUPlace()): input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]] x = paddle.to_tensor(input, stop_gradient=False) y = paddle.renorm(x, 1.0, 2, 2.05) @@ -94,4 +94,5 @@ class TestRenormAPI(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py b/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py index ca324b4a8fd..1bfc1b00aa8 100644 --- a/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py +++ b/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py @@ -23,6 +23,7 @@ from test_multiclass_nms_op import iou from test_multiclass_nms_op import nms import paddle.fluid as fluid from paddle.fluid import Program, program_guard +import paddle def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold): @@ -518,4 +519,5 @@ class TestRetinanetDetectionOutOpError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py index 9a97f57aaae..74409c86710 100644 --- a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py @@ -178,4 +178,5 @@ class SmoothL1Loss(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_tile_op.py b/python/paddle/fluid/tests/unittests/test_tile_op.py index b0f065a26a0..8359141f309 100644 --- a/python/paddle/fluid/tests/unittests/test_tile_op.py +++ b/python/paddle/fluid/tests/unittests/test_tile_op.py @@ -22,7 +22,7 @@ import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard -# Situation 1: repeat_times is a list (without tensor) +#Situation 1: repeat_times is a list (without tensor) class TestTileOpRank1(OpTest): def setUp(self): self.op_type = "tile" @@ -248,4 +248,5 @@ class TestTileAPI(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index dbd40c349bb..57a7f94bedc 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -1361,4 +1361,5 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 15d5640b11f..59e285c1200 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -68,8 +68,9 @@ def to_list(value): def to_numpy(var): - assert isinstance(var, (Variable, fluid.core.VarBase)), "not a variable" - if isinstance(var, fluid.core.VarBase): + assert isinstance(var, (Variable, fluid.core.VarBase, + fluid.core.eager.Tensor)), "not a variable" + if isinstance(var, (fluid.core.VarBase, fluid.core.eager.Tensor)): return var.numpy() t = global_scope().find_var(var.name).get_tensor() return np.array(t) diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index d75c95b4372..ef62aa264fb 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -282,7 +282,7 @@ class Accuracy(Metric): Return: Tensor: the accuracy of current step. """ - if isinstance(correct, paddle.Tensor): + if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): correct = correct.numpy() num_samples = np.prod(np.array(correct.shape[:-1])) accs = [] @@ -410,12 +410,12 @@ class Precision(Metric): the shape should keep the same as preds. The data type is 'int32' or 'int64'. """ - if isinstance(preds, paddle.Tensor): + if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): preds = preds.numpy() elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") - if isinstance(labels, paddle.Tensor): + if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): labels = labels.numpy() elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") @@ -543,12 +543,12 @@ class Recall(Metric): the shape should keep the same as preds. Shape: [batch_size, 1], Dtype: 'int32' or 'int64'. """ - if isinstance(preds, paddle.Tensor): + if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): preds = preds.numpy() elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") - if isinstance(labels, paddle.Tensor): + if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): labels = labels.numpy() elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") @@ -698,12 +698,12 @@ class Auc(Metric): (batch_size, 1), labels[i] is either o or 1, representing the label of the instance i. """ - if isinstance(labels, paddle.Tensor): + if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): labels = labels.numpy() elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") - if isinstance(preds, paddle.Tensor): + if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): preds = preds.numpy() elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index ce2877f9c39..aa2d2e16118 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -462,7 +462,7 @@ def is_tensor(x): print(check) #False """ - return isinstance(x, Tensor) + return isinstance(x, (Tensor, paddle.fluid.core.eager.Tensor)) def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 9a9383cee42..e530bfd8536 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -263,6 +263,9 @@ def fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None): setattr(core.VarBase, 'fill_diagonal_tensor', fill_diagonal_tensor) +if core._in_eager_mode(): + setattr(core.eager.Tensor, 'fill_diagonal_tensor', fill_diagonal_tensor) + @dygraph_only def tolist(x): diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index c6fafd25014..ced2113733c 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1335,7 +1335,7 @@ def renorm(x, p, axis, max_norm): raise ValueError("the axis:{} should not be less than -1 * length of input_shape:{}".format(axis,-1 * len(input_shape))) axis = axis + len(input_shape) if paddle.in_dynamic_mode(): - out = core.ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm) + out = _C_ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm) return out inputs = {'X': x} -- GitLab