diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index b8d59e8dd8b4c60e28323955effd232eb2b51945..df2cdc35626a8aa27899f7340fa14285299a11d1 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -2032,7 +2032,15 @@ static std::string GenerateSingleOpBase( const char* ATTRS_TEMPLATE = " auto& %s = this->attr_map_;\n"; std::string grad_attrs_str = paddle::string::Sprintf(ATTRS_TEMPLATE, attrs_name); - + if (fwd_op_type == "cast") { + // swtich in out dtype + const char* CAST_GRAD = + " auto temp_type = %s[\"in_dtype\"];\n" + " %s[\"in_dtype\"] = %s[\"out_dtype\"];\n" + " %s[\"out_dtype\"] = temp_type;\n"; + grad_attrs_str += paddle::string::Sprintf(CAST_GRAD, attrs_name, attrs_name, + attrs_name, attrs_name); + } // Handle dynamic grad attributes grad_attrs_str += HandleDynamicGradAttributes(fwd_op_type, attrs_name); generated_grad_function_body += grad_attrs_str; diff --git a/paddle/fluid/eager/grad_tensor_holder.cc b/paddle/fluid/eager/grad_tensor_holder.cc index 163d25e85ce8c085087331c6e3273075aed5e5f4..038ad09aa4d8bef1282c024559b60d0eed7e48d1 100644 --- a/paddle/fluid/eager/grad_tensor_holder.cc +++ b/paddle/fluid/eager/grad_tensor_holder.cc @@ -93,7 +93,7 @@ void GradTensorHolder::add(size_t slot_id, size_t rank, // Create new tensor->impl and fill it with 1.0 if (t.defined()) { // Fill 1.0 - buffer_[slot_id][rank] = paddle::experimental::ones_like(t); + buffer_[slot_id][rank] = paddle::experimental::ones_like(t, t.dtype()); } } } diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 355b4076a0bedfd2021a2e18e6c339c17956a768..6817fa4bf04b9fa7e0b95ebcd5fef062fc72d7c6 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -62,6 +62,8 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) { return pybind11::detail::npy_api::NPY_INT32_; case phi::DataType::INT64: return pybind11::detail::npy_api::NPY_INT64_; + case phi::DataType::BFLOAT16: + return pybind11::detail::NPY_UINT16_; case phi::DataType::FLOAT16: return pybind11::detail::NPY_FLOAT16_; case phi::DataType::BFLOAT16: diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index 9439982858530e1e81156be4b32ef2d91dc4a33a..b4c5a36d288b7ee0f6e771d72b21bd54d1e3d669 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -612,7 +612,7 @@ def grad(outputs, if no_grad_vars is None: no_grad_vars = [] - elif isinstance(no_grad_vars, core.VarBase): + elif isinstance(no_grad_vars, (core.VarBase, core.eager.Tensor)): no_grad_vars = [no_grad_vars] elif isinstance(no_grad_vars, core.eager.Tensor): no_grad_vars = [no_grad_vars] @@ -718,13 +718,13 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): y.shape # [3L, 2L] """ - support_type = (list, tuple, np.ndarray, core.VarBase, framework.Variable, - core.Tensor, core.LoDTensor) + support_type = (list, tuple, np.ndarray, core.eager.Tensor, core.VarBase, + framework.Variable, core.Tensor, core.LoDTensor) if not isinstance(value, support_type): raise TypeError( "The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s." % (support_type, type(value))) - if isinstance(value, (core.VarBase, framework.Variable)): + if isinstance(value, (core.eager.Tensor, core.VarBase, framework.Variable)): return value elif isinstance(value, (core.Tensor, core.LoDTensor)): return core.VarBase(value) diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 2b67a2029727f6b8f917239094a1b906d5cd6a62..af30b2b2444b44f1b27e8f277eb380557255517d 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -28,6 +28,7 @@ from .math_op_patch import monkey_patch_math_varbase from .parallel import scale_loss from paddle.fluid.data_feeder import convert_dtype, _PADDLE_DTYPE_2_NUMPY_DTYPE import paddle.utils.deprecated as deprecated +from paddle import _C_ops class TensorHookRemoveHelper(object): @@ -782,7 +783,7 @@ def monkey_patch_varbase(): @framework.dygraph_only def clone(self): - return _C_ops_.assign(self) + return _C_ops.assign(self) @framework.dygraph_only def value(self): diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index d0a94238a7aeb21f9d1baf8154cbe3b7f2b77a72..fb787215d910e9924622147b86c328af5e1994de 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -316,7 +316,8 @@ def _dygraph_not_support_(func): def _dygraph_only_(func): def __impl__(*args, **kwargs): - assert in_dygraph_mode( + assert ( + in_dygraph_mode() or _in_eager_mode() ), "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__ return func(*args, **kwargs) diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 9348b0b50a1c08e7103dc3cc32169f4a6a40591c..c45045509201df89d6a07b8c0aadc7ef9130cf2f 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -886,6 +886,7 @@ class TestDistributeFpnProposals(LayerTest): refer_level=4, refer_scale=224, rois_num=rois_num_dy) + print(type(multi_rois_dy)) output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy output_dy_np = [] for output in output_dy: @@ -973,4 +974,5 @@ class TestBoxDecoderAndAssign(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py index cc2e14d6d6c2ef237351e372c75ca7e700de3fbf..341ec852c52197f689870f0a6c45141ebe318301 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py @@ -50,3 +50,7 @@ class TestExponentialFamilyException(unittest.TestCase): def test_entropy_exception(self): with self.assertRaises(NotImplementedError): paddle.distribution.ExponentialFamily.entropy(self.dist) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/distribution/test_kl.py b/python/paddle/fluid/tests/unittests/distribution/test_kl.py index a1413722446e287688d7e120a3ef31ea67cc798b..55358380c8b23fdfd512b259aca06901d5623e38 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_kl.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_kl.py @@ -112,3 +112,7 @@ class TestKLExpfamilyExpFamily(unittest.TestCase): kl._kl_expfamily_expfamily(self.p, self.q), rtol=config.RTOL.get(config.DEFAULT_DTYPE), atol=config.ATOL.get(config.DEFAULT_DTYPE)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py index 737c085dde6acf5e3645b2127f42b1d8b5a7aa1d..34b6f6dc8e5453b42e10c45d5423c6e17d2d0506 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py @@ -20,6 +20,7 @@ import sys sys.path.append("../") from op_test import OpTest +import paddle from paddle import fluid @@ -115,4 +116,5 @@ class TestSequenceConcatOpError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_Tensor_type.py b/python/paddle/fluid/tests/unittests/test_Tensor_type.py index 59395b94279ea7ec4fe43221deede7e82be8f38e..f1427d29782b969d9571f79c9a7bc62bf4e77070 100644 --- a/python/paddle/fluid/tests/unittests/test_Tensor_type.py +++ b/python/paddle/fluid/tests/unittests/test_Tensor_type.py @@ -39,6 +39,7 @@ class TensorTypeTest(unittest.TestCase): tensorx = paddle.tensor.logic.Tensor(inx) typex_str = str(type(tensorx)) + expectx = "" self.assertEqual((typex_str == expectx), True) diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index ecac22553cbcda7cc2dae179603f407eddc8652a..d05c9a3c313bb634effd9280e3d9503142166ee4 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -1202,4 +1202,5 @@ class TestMultiTensorAdam(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index b440e745b1082e98a832ea076cc052cbc106eeab..789cfa82658f43d2adb148fe41fd2fb380e96fba 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -451,4 +451,5 @@ class TestLayerTo(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_create_parameter.py b/python/paddle/fluid/tests/unittests/test_create_parameter.py index 763fb64816c9c66055b3ead2886e4ba29e0406f7..199558acd4ef64f4d63c04920ba0b0e0295df96c 100644 --- a/python/paddle/fluid/tests/unittests/test_create_parameter.py +++ b/python/paddle/fluid/tests/unittests/test_create_parameter.py @@ -18,6 +18,7 @@ import numpy as np import paddle.fluid as fluid from paddle.fluid import Program, program_guard from paddle.fluid import ParamAttr, initializer +import paddle class TestCreateParameterError(unittest.TestCase): @@ -50,4 +51,5 @@ class TestCreateParameterError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_ctc_align.py b/python/paddle/fluid/tests/unittests/test_ctc_align.py index f5934debfd7b663b24a0949012ea2aa85e07ece8..ffc5bc184efc222d3adb57e158814c0f592b9405 100644 --- a/python/paddle/fluid/tests/unittests/test_ctc_align.py +++ b/python/paddle/fluid/tests/unittests/test_ctc_align.py @@ -20,6 +20,7 @@ import numpy as np from op_test import OpTest from test_softmax_op import stable_softmax import paddle.fluid as fluid +import paddle def CTCAlign(input, lod, blank, merge_repeated, padding=0, input_length=None): @@ -229,4 +230,5 @@ class BadInputTestCTCAlignr(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_diff_op.py b/python/paddle/fluid/tests/unittests/test_diff_op.py index 1ae780f488d2dc6bf37f88505a67723ea867dd94..4a96827bd7c3c56320a58261abe1824786164d10 100644 --- a/python/paddle/fluid/tests/unittests/test_diff_op.py +++ b/python/paddle/fluid/tests/unittests/test_diff_op.py @@ -211,4 +211,5 @@ class TestDiffOpPreAppendAxis(TestDiffOp): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py index f95546f15f0024ccd8b7cd8464f0a8eb70662d8d..27d82fcc8903be20a378a45e0f4f3b01aa3d3bb7 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py @@ -190,4 +190,5 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_exponential_op.py b/python/paddle/fluid/tests/unittests/test_exponential_op.py index ccbc0a1676302b4c29b524601930cc855847e0fc..7a3ae203be62d644f076ae9b6bc2bf5b8641ccdf 100644 --- a/python/paddle/fluid/tests/unittests/test_exponential_op.py +++ b/python/paddle/fluid/tests/unittests/test_exponential_op.py @@ -209,4 +209,5 @@ class TestExponentialAPI(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fmin_op.py b/python/paddle/fluid/tests/unittests/test_fmin_op.py index 5cdf096be6708c47dd1f56dc97243be70c6d63d5..7231823c375324aa7bbf7d45db14b4457ca4a8dd 100644 --- a/python/paddle/fluid/tests/unittests/test_fmin_op.py +++ b/python/paddle/fluid/tests/unittests/test_fmin_op.py @@ -189,3 +189,8 @@ class TestElementwiseFmin2Op(OpTest): """test_check_grad_ingore_y""" self.check_grad( ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) + + +if __name__ == "__main__": + paddle.enable_static() + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index bff10c9c4ca26d342a6849a0b23a490058d6b7f7..8dc822c69b2c5df34968fbcd39b8d8438700add2 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -1025,4 +1025,5 @@ class TestDiracInitializer3(TestDiracInitializer1): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_inner.py b/python/paddle/fluid/tests/unittests/test_inner.py index de9decd0b8961115b7ee2e6dac44bfb40fcc5c1f..ff9f15ebbfc8204de042d7731ed94035152f46eb 100644 --- a/python/paddle/fluid/tests/unittests/test_inner.py +++ b/python/paddle/fluid/tests/unittests/test_inner.py @@ -163,4 +163,5 @@ class TestMultiplyError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_io_save_load.py b/python/paddle/fluid/tests/unittests/test_io_save_load.py index 89ca28510b9b929b1fe36e0c9883da020e71555c..83aadbf68d569f904d56abfcab91236bd637095b 100644 --- a/python/paddle/fluid/tests/unittests/test_io_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_io_save_load.py @@ -88,4 +88,5 @@ class TestWhenTrainWithNoGrad(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_isclose_op.py b/python/paddle/fluid/tests/unittests/test_isclose_op.py index aa39284d11349eed027a1a496ce6d8b2b5e92e3d..2bb58d7c5741f2655bdcbffecedf8762704c07f3 100644 --- a/python/paddle/fluid/tests/unittests/test_isclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_isclose_op.py @@ -210,6 +210,9 @@ class TestIscloseOpFloat64(TestIscloseOp): self.atol = np.array([0]).astype("float64") self.equal_nan = False + def test_check_output(self): + self.check_output() + class TestIscloseOpLargeDimInput(TestIscloseOp): def set_args(self): @@ -222,4 +225,5 @@ class TestIscloseOpLargeDimInput(TestIscloseOp): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_log_softmax.py b/python/paddle/fluid/tests/unittests/test_log_softmax.py index 16f954708d4d4149f46a18cfd48e35dfbe147153..423eeaf3ada45e7d04dca3512bdba0b067583222 100644 --- a/python/paddle/fluid/tests/unittests/test_log_softmax.py +++ b/python/paddle/fluid/tests/unittests/test_log_softmax.py @@ -175,4 +175,5 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py index 6d94144fc7788d0dc79cfb10f97667a257621a04..60dd4948f996e505f59d7e12b92569000843c528 100644 --- a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py @@ -555,4 +555,5 @@ class TestLRScheduler(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mean_iou.py b/python/paddle/fluid/tests/unittests/test_mean_iou.py index e2e118ac9e3b46499055c2dd46755d5401d5abd5..4e89a9034a341777f09958d9709b64a12020ec28 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_iou.py +++ b/python/paddle/fluid/tests/unittests/test_mean_iou.py @@ -19,6 +19,7 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid +import paddle def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects, @@ -129,4 +130,5 @@ class TestMeanIOUOpError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_minus_op.py b/python/paddle/fluid/tests/unittests/test_minus_op.py index 54253b17b967871b03628023c5a9fdb339af1828..461ff6a9273cdb39c73901da3f77fca021335f0c 100644 --- a/python/paddle/fluid/tests/unittests/test_minus_op.py +++ b/python/paddle/fluid/tests/unittests/test_minus_op.py @@ -17,6 +17,7 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle class TestMinusOp(OpTest): @@ -36,4 +37,5 @@ class TestMinusOp(OpTest): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_norm_all.py b/python/paddle/fluid/tests/unittests/test_norm_all.py index 575bc653618a583e883783cd1fffe1db371eccff..ef912699455d1b4ea2aa2899f20d0e2e09634f77 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_all.py +++ b/python/paddle/fluid/tests/unittests/test_norm_all.py @@ -588,4 +588,5 @@ class API_NormTest(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py index 9e0cf6ddef2d619e4d3b32260f7ddf5f31186ae5..8945d35c131fd8de89e2a421bbbd4b16aa01c9d8 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py @@ -315,7 +315,9 @@ class TestSaveLoadAny(unittest.TestCase): paddle.save(tensor, path) t_dygraph = paddle.load(path) np_dygraph = paddle.load(path, return_numpy=True) - self.assertTrue(isinstance(t_dygraph, paddle.fluid.core.VarBase)) + self.assertTrue( + isinstance(t_dygraph, (paddle.fluid.core.VarBase, + paddle.fluid.core.eager.Tensor))) self.assertTrue(np.array_equal(tensor.numpy(), np_dygraph)) self.assertTrue(np.array_equal(tensor.numpy(), t_dygraph.numpy())) paddle.enable_static() @@ -685,27 +687,34 @@ class TestSaveLoadAny(unittest.TestCase): np.array(v), np.array(load_tensor2['k2'][k]))) self.assertTrue(load_tensor2['epoch'] == 123) - self.assertTrue(isinstance(load_tensor3[0], fluid.core.VarBase)) + self.assertTrue( + isinstance(load_tensor3[0], (fluid.core.VarBase, + fluid.core.eager.Tensor))) self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0])) - self.assertTrue(isinstance(load_tensor3[1], fluid.core.VarBase)) + self.assertTrue( + isinstance(load_tensor3[1], (fluid.core.VarBase, + fluid.core.eager.Tensor))) self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1])) for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["state_dict"][k], - fluid.core.VarBase)) + isinstance(load_tensor3[2]["state_dict"][k], ( + fluid.core.VarBase, fluid.core.eager.Tensor))) self.assertTrue( np.array_equal(load_tensor3[2]["state_dict"][k].numpy(), np.array(v))) for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["opt"][k], fluid.core.VarBase)) + isinstance(load_tensor3[2]["opt"][k], ( + fluid.core.VarBase, fluid.core.eager.Tensor))) self.assertTrue( np.array_equal(load_tensor3[2]["opt"][k].numpy(), np.array(v))) - self.assertTrue(isinstance(load_tensor4[0], fluid.core.VarBase)) + self.assertTrue( + isinstance(load_tensor4[0], (fluid.core.VarBase, + fluid.core.eager.Tensor))) self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0])) load_array1 = paddle.load(path1, return_numpy=True) diff --git a/python/paddle/fluid/tests/unittests/test_renorm_op.py b/python/paddle/fluid/tests/unittests/test_renorm_op.py index 3ea2002a9786fdd3f6c034e84176d0cae46ca591..e00a892cf7197bc94d85e9082651e26a4bb3bbb9 100644 --- a/python/paddle/fluid/tests/unittests/test_renorm_op.py +++ b/python/paddle/fluid/tests/unittests/test_renorm_op.py @@ -54,7 +54,7 @@ class TestRenormAPI(unittest.TestCase): def test_dygraph_api(self): self.input_data() # case axis none - with fluid.dygraph.guard(): + with fluid.dygraph.guard(fluid.CPUPlace()): input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]] x = paddle.to_tensor(input, stop_gradient=False) y = paddle.renorm(x, 1.0, 2, 2.05) @@ -94,4 +94,5 @@ class TestRenormAPI(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py b/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py index ca324b4a8fd0581e7483c12321f54acaa1965f54..1bfc1b00aa8227e6ccaefcaf1044774ed1404f45 100644 --- a/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py +++ b/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py @@ -23,6 +23,7 @@ from test_multiclass_nms_op import iou from test_multiclass_nms_op import nms import paddle.fluid as fluid from paddle.fluid import Program, program_guard +import paddle def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold): @@ -518,4 +519,5 @@ class TestRetinanetDetectionOutOpError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py index 9a97f57aaae5f290b20e34242b1b43e5e352223d..74409c8671059673121d0a73ed85d2cad8e3d6f2 100644 --- a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py @@ -178,4 +178,5 @@ class SmoothL1Loss(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_tile_op.py b/python/paddle/fluid/tests/unittests/test_tile_op.py index b0f065a26a006ee3553a84938fb5b6b2db7b3172..8359141f309f523d73ddb4375b7778828ab15490 100644 --- a/python/paddle/fluid/tests/unittests/test_tile_op.py +++ b/python/paddle/fluid/tests/unittests/test_tile_op.py @@ -22,7 +22,7 @@ import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard -# Situation 1: repeat_times is a list (without tensor) +#Situation 1: repeat_times is a list (without tensor) class TestTileOpRank1(OpTest): def setUp(self): self.op_type = "tile" @@ -248,4 +248,5 @@ class TestTileAPI(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index dbd40c349bbc81d39b8a929ee5b3e7b81a083406..57a7f94bedce9fb3cd9981e6ae21f6d902fd04d9 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -1361,4 +1361,5 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 15d5640b11fe501e0d9f83168c434f9f02d7877c..59e285c1200b88cadd2016421b1a8de70c7dad34 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -68,8 +68,9 @@ def to_list(value): def to_numpy(var): - assert isinstance(var, (Variable, fluid.core.VarBase)), "not a variable" - if isinstance(var, fluid.core.VarBase): + assert isinstance(var, (Variable, fluid.core.VarBase, + fluid.core.eager.Tensor)), "not a variable" + if isinstance(var, (fluid.core.VarBase, fluid.core.eager.Tensor)): return var.numpy() t = global_scope().find_var(var.name).get_tensor() return np.array(t) diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index d75c95b437201f73df8f39d048bd2eae7de5d998..ef62aa264fb26db0a26efabd3ea3aef2e9abcd46 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -282,7 +282,7 @@ class Accuracy(Metric): Return: Tensor: the accuracy of current step. """ - if isinstance(correct, paddle.Tensor): + if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): correct = correct.numpy() num_samples = np.prod(np.array(correct.shape[:-1])) accs = [] @@ -410,12 +410,12 @@ class Precision(Metric): the shape should keep the same as preds. The data type is 'int32' or 'int64'. """ - if isinstance(preds, paddle.Tensor): + if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): preds = preds.numpy() elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") - if isinstance(labels, paddle.Tensor): + if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): labels = labels.numpy() elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") @@ -543,12 +543,12 @@ class Recall(Metric): the shape should keep the same as preds. Shape: [batch_size, 1], Dtype: 'int32' or 'int64'. """ - if isinstance(preds, paddle.Tensor): + if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): preds = preds.numpy() elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") - if isinstance(labels, paddle.Tensor): + if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): labels = labels.numpy() elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") @@ -698,12 +698,12 @@ class Auc(Metric): (batch_size, 1), labels[i] is either o or 1, representing the label of the instance i. """ - if isinstance(labels, paddle.Tensor): + if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): labels = labels.numpy() elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") - if isinstance(preds, paddle.Tensor): + if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): preds = preds.numpy() elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index ce2877f9c39bdf48feb7c0614a5dbc01c9ac8235..aa2d2e161181b93d24bae1c74d120143ebd0046c 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -462,7 +462,7 @@ def is_tensor(x): print(check) #False """ - return isinstance(x, Tensor) + return isinstance(x, (Tensor, paddle.fluid.core.eager.Tensor)) def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 9a9383cee420248deb22193876b8e3db70a0f8d2..e530bfd8536a42ef84bde0a9fb8c15fe47241fc5 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -263,6 +263,9 @@ def fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None): setattr(core.VarBase, 'fill_diagonal_tensor', fill_diagonal_tensor) +if core._in_eager_mode(): + setattr(core.eager.Tensor, 'fill_diagonal_tensor', fill_diagonal_tensor) + @dygraph_only def tolist(x): diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index c6fafd25014c2f44aa0e8228bfaf0a4decf138a7..ced2113733c02ad924d7a7e0be5b357a35447197 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1335,7 +1335,7 @@ def renorm(x, p, axis, max_norm): raise ValueError("the axis:{} should not be less than -1 * length of input_shape:{}".format(axis,-1 * len(input_shape))) axis = axis + len(input_shape) if paddle.in_dynamic_mode(): - out = core.ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm) + out = _C_ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm) return out inputs = {'X': x}