From b543998f1e073c12d1d0fda0c76f5b4972816ee4 Mon Sep 17 00:00:00 2001 From: Weilong Wu Date: Sun, 24 Apr 2022 10:15:54 +0800 Subject: [PATCH] [Cherry-pick, Eager] Fix CastPyArg2scalar for max value of int64 (#42098) (#42129) * [Eager] Fix CastPyArg2scalar for max value of int64 (#42098) * [Eager] Fix CastPyArg2Scalar in Long case * Add more test cases for paddle.clip * Use PyLong_AsLongLong * Fix merge conflicts --- paddle/fluid/pybind/eager_utils.cc | 19 +++++++------- paddle/fluid/pybind/op_function_common.cc | 2 +- .../fluid/tests/unittests/test_clip_op.py | 26 ++++++++++++++++--- 3 files changed, 34 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index c0167a19e2b..b0e62cdc512 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1054,23 +1054,24 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, PyTypeObject* type = obj->ob_type; auto type_name = std::string(type->tp_name); VLOG(1) << "type_name: " << type_name; - if (type_name == "int") { - int value = CastPyArg2Int(obj, op_type, arg_pos); + if (PyBool_Check(obj)) { + bool value = CastPyArg2Boolean(obj, op_type, arg_pos); return paddle::experimental::Scalar(value); - } else if (type_name == "float") { - float value = CastPyArg2Float(obj, op_type, arg_pos); + } else if (PyLong_Check(obj)) { + int64_t value = CastPyArg2Long(obj, op_type, arg_pos); return paddle::experimental::Scalar(value); - - } else if (type_name == "bool") { - bool value = CastPyArg2Boolean(obj, op_type, arg_pos); + } else if (PyFloat_Check(obj)) { + float value = CastPyArg2Float(obj, op_type, arg_pos); return paddle::experimental::Scalar(value); - - } else if (type_name == "Tensor") { + } else if (IsEagerTensor(obj)) { paddle::experimental::Tensor& value = GetTensorFromPyObject( op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/); return paddle::experimental::Scalar(value); } else if (type_name.find("numpy") != std::string::npos) { return CastNumpy2Scalar(obj, op_type, arg_pos); + } else if (PyObject_CheckLongOrToLong(&obj)) { + int value = CastPyArg2Int(obj, op_type, arg_pos); + return paddle::experimental::Scalar(value); } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index 50e0daf8508..5eed63d0800 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -153,7 +153,7 @@ void CastPyArg2AttrInt(PyObject* obj, int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type, ssize_t arg_pos) { if (PyObject_CheckLongOrToLong(&obj)) { - return (int64_t)PyLong_AsLong(obj); // NOLINT + return (int64_t)PyLong_AsLongLong(obj); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index 37b1cfd02fa..121b91d7415 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -200,7 +200,7 @@ class TestClipAPI(unittest.TestCase): np.allclose(res11, (data * 10).astype(np.int64).clip(2, 8))) paddle.disable_static() - def test_clip_dygraph(self): + def func_clip_dygraph(self): paddle.disable_static() place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( ) else fluid.CPUPlace() @@ -233,9 +233,29 @@ class TestClipAPI(unittest.TestCase): np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8))) self.assertTrue(np.allclose(out_6.numpy(), data.clip(0.2, 0.8))) - def test_eager(self): + def test_clip_dygraph(self): + with _test_eager_guard(): + self.func_clip_dygraph() + self.func_clip_dygraph() + + def test_clip_dygraph_default_max(self): + paddle.disable_static() with _test_eager_guard(): - self.test_clip_dygraph() + x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32") + x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64") + x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32") + egr_out1 = paddle.clip(x_int32, min=1) + egr_out2 = paddle.clip(x_int64, min=1) + egr_out3 = paddle.clip(x_f32, min=1) + x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32") + x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64") + x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32") + out1 = paddle.clip(x_int32, min=1) + out2 = paddle.clip(x_int64, min=1) + out3 = paddle.clip(x_f32, min=1) + self.assertTrue(np.allclose(out1.numpy(), egr_out1.numpy())) + self.assertTrue(np.allclose(out2.numpy(), egr_out2.numpy())) + self.assertTrue(np.allclose(out3.numpy(), egr_out3.numpy())) def test_errors(self): paddle.enable_static() -- GitLab