From e4cb897e983fd45efd34e776b5b7ed862be117c4 Mon Sep 17 00:00:00 2001 From: Aurelius84 Date: Thu, 21 Apr 2022 10:52:49 +0800 Subject: [PATCH] [Eager]Fix full_like/clip with np.generic type as attribute (#41808) (#41974) * [Eager]Fix full_like/clip with np.generic type as attribute * support numpy genertic * remove usless code --- paddle/fluid/pybind/eager_utils.cc | 30 ++++++++++++++++++- paddle/fluid/pybind/eager_utils.h | 4 +++ paddle/fluid/pybind/op_function_common.cc | 7 ++++- paddle/fluid/pybind/op_function_common.h | 2 ++ .../fluid/tests/unittests/test_clip_op.py | 3 ++ .../fluid/tests/unittests/test_full_op.py | 6 ++++ 6 files changed, 50 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 8fa21ef45f..bd7194198e 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1013,6 +1013,32 @@ paddle::experimental::Tensor& GetTensorFromPyObject(PyObject* obj) { return reinterpret_cast(obj)->tensor; } +paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos) { + PyTypeObject* type = obj->ob_type; + auto type_name = std::string(type->tp_name); + VLOG(1) << "type_name: " << type_name; + if (type_name == "numpy.float64") { + double value = CastPyArg2Double(obj, op_type, arg_pos); + return paddle::experimental::Scalar(value); + } else if (type_name == "numpy.float32") { + float value = CastPyArg2Float(obj, op_type, arg_pos); + return paddle::experimental::Scalar(value); + } else if (type_name == "numpy.int64") { + int64_t value = CastPyArg2Long(obj, op_type, arg_pos); + return paddle::experimental::Scalar(value); + } else if (type_name == "numpy.int32") { + int value = CastPyArg2Int(obj, op_type, arg_pos); + return paddle::experimental::Scalar(value); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument (position %d) must be " + "numpy.float32/float64, numpy.int32/int64, but got %s", + op_type, arg_pos + 1, type_name)); // NOLINT + } +} + paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, const std::string& op_type, ssize_t arg_pos) { @@ -1027,6 +1053,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, // obj could be: int, float, bool, paddle.Tensor PyTypeObject* type = obj->ob_type; auto type_name = std::string(type->tp_name); + VLOG(1) << "type_name: " << type_name; if (type_name == "int") { int value = CastPyArg2Int(obj, op_type, arg_pos); return paddle::experimental::Scalar(value); @@ -1042,7 +1069,8 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, paddle::experimental::Tensor& value = GetTensorFromPyObject( op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/); return paddle::experimental::Scalar(value); - + } else if (type_name.find("numpy") != std::string::npos) { + return CastNumpy2Scalar(obj, op_type, arg_pos); } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 90c4d72792..1e9cf705ac 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -158,6 +158,10 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, const std::string& op_type, ssize_t arg_pos); +paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos); + paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj, const std::string& op_type, ssize_t arg_pos); diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index 614689b148..50e0daf850 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -174,8 +174,13 @@ void CastPyArg2AttrLong(PyObject* obj, float CastPyArg2Float(PyObject* obj, const std::string& op_type, ssize_t arg_pos) { + return static_cast(CastPyArg2Double(obj, op_type, arg_pos)); +} + +double CastPyArg2Double(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { if (PyObject_CheckFloatOrToFloat(&obj)) { - return (float)PyFloat_AsDouble(obj); // NOLINT + return PyFloat_AsDouble(obj); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " diff --git a/paddle/fluid/pybind/op_function_common.h b/paddle/fluid/pybind/op_function_common.h index 33d0e242a0..debaf8fae1 100644 --- a/paddle/fluid/pybind/op_function_common.h +++ b/paddle/fluid/pybind/op_function_common.h @@ -50,6 +50,8 @@ int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type, ssize_t arg_pos); float CastPyArg2Float(PyObject* obj, const std::string& op_type, ssize_t arg_pos); +double CastPyArg2Double(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); std::string CastPyArg2String(PyObject* obj, const std::string& op_type, ssize_t arg_pos); std::vector CastPyArg2Booleans(PyObject* obj, const std::string& op_type, diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index f4423ccd02..37b1cfd02f 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -221,6 +221,8 @@ class TestClipAPI(unittest.TestCase): paddle.cast(images * 10, 'int32'), min=2, max=8) out_5 = self._executed_api( paddle.cast(images * 10, 'int64'), min=2, max=8) + # test with numpy.generic + out_6 = self._executed_api(images, min=np.abs(0.2), max=np.abs(0.8)) self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8))) self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9))) @@ -229,6 +231,7 @@ class TestClipAPI(unittest.TestCase): np.allclose(out_4.numpy(), (data * 10).astype(np.int32).clip(2, 8))) self.assertTrue( np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8))) + self.assertTrue(np.allclose(out_6.numpy(), data.clip(0.2, 0.8))) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_full_op.py b/python/paddle/fluid/tests/unittests/test_full_op.py index 2442f2b681..108469cf8a 100644 --- a/python/paddle/fluid/tests/unittests/test_full_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_op.py @@ -108,6 +108,9 @@ class TestFullAPI(unittest.TestCase): shape=[1], dtype=np.float32, value=1.1) out_7 = paddle.full( shape=[1, 2], dtype=np.float32, fill_value=val) + # test for numpy.float64 as fill_value + out_8 = paddle.full_like( + out_7, dtype=np.float32, fill_value=np.abs(1.1)) assert np.array_equal( out_1, np.full( @@ -130,6 +133,9 @@ class TestFullAPI(unittest.TestCase): assert np.array_equal( out_7, np.full( [1, 2], 1.1, dtype="float32")) + assert np.array_equal( + out_8, np.full( + [1, 2], 1.1, dtype="float32")) class TestFullOpError(unittest.TestCase): -- GitLab