diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 8fa21ef45f82ff3a0f77d10a1e04960ecc85e8d9..bd7194198e565673b7f91a2af783eb17982bbd83 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1013,6 +1013,32 @@ paddle::experimental::Tensor& GetTensorFromPyObject(PyObject* obj) { return reinterpret_cast(obj)->tensor; } +paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos) { + PyTypeObject* type = obj->ob_type; + auto type_name = std::string(type->tp_name); + VLOG(1) << "type_name: " << type_name; + if (type_name == "numpy.float64") { + double value = CastPyArg2Double(obj, op_type, arg_pos); + return paddle::experimental::Scalar(value); + } else if (type_name == "numpy.float32") { + float value = CastPyArg2Float(obj, op_type, arg_pos); + return paddle::experimental::Scalar(value); + } else if (type_name == "numpy.int64") { + int64_t value = CastPyArg2Long(obj, op_type, arg_pos); + return paddle::experimental::Scalar(value); + } else if (type_name == "numpy.int32") { + int value = CastPyArg2Int(obj, op_type, arg_pos); + return paddle::experimental::Scalar(value); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument (position %d) must be " + "numpy.float32/float64, numpy.int32/int64, but got %s", + op_type, arg_pos + 1, type_name)); // NOLINT + } +} + paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, const std::string& op_type, ssize_t arg_pos) { @@ -1027,6 +1053,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, // obj could be: int, float, bool, paddle.Tensor PyTypeObject* type = obj->ob_type; auto type_name = std::string(type->tp_name); + VLOG(1) << "type_name: " << type_name; if (type_name == "int") { int value = CastPyArg2Int(obj, op_type, arg_pos); return paddle::experimental::Scalar(value); @@ -1042,7 +1069,8 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, paddle::experimental::Tensor& value = GetTensorFromPyObject( op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/); return paddle::experimental::Scalar(value); - + } else if (type_name.find("numpy") != std::string::npos) { + return CastNumpy2Scalar(obj, op_type, arg_pos); } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 90c4d727923d0acef72cb65526f84c6feddf515f..1e9cf705ace1d604ea067320d7581019d1ef5282 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -158,6 +158,10 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, const std::string& op_type, ssize_t arg_pos); +paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos); + paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj, const std::string& op_type, ssize_t arg_pos); diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index 614689b148cdae7adaaeebd290a76d6dccca00db..50e0daf8508e3bfa4abda62cef2ceebc529d327d 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -174,8 +174,13 @@ void CastPyArg2AttrLong(PyObject* obj, float CastPyArg2Float(PyObject* obj, const std::string& op_type, ssize_t arg_pos) { + return static_cast(CastPyArg2Double(obj, op_type, arg_pos)); +} + +double CastPyArg2Double(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { if (PyObject_CheckFloatOrToFloat(&obj)) { - return (float)PyFloat_AsDouble(obj); // NOLINT + return PyFloat_AsDouble(obj); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " diff --git a/paddle/fluid/pybind/op_function_common.h b/paddle/fluid/pybind/op_function_common.h index 33d0e242a027d250904a21ca36a39b6a639178e1..debaf8fae17b7caa3137852d2f5156349bae704e 100644 --- a/paddle/fluid/pybind/op_function_common.h +++ b/paddle/fluid/pybind/op_function_common.h @@ -50,6 +50,8 @@ int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type, ssize_t arg_pos); float CastPyArg2Float(PyObject* obj, const std::string& op_type, ssize_t arg_pos); +double CastPyArg2Double(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); std::string CastPyArg2String(PyObject* obj, const std::string& op_type, ssize_t arg_pos); std::vector CastPyArg2Booleans(PyObject* obj, const std::string& op_type, diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index f4423ccd0294cc8cdf88a20b75c9f6f32dddd3db..37b1cfd02faf7255f02e0310cf5cbd239eaab851 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -221,6 +221,8 @@ class TestClipAPI(unittest.TestCase): paddle.cast(images * 10, 'int32'), min=2, max=8) out_5 = self._executed_api( paddle.cast(images * 10, 'int64'), min=2, max=8) + # test with numpy.generic + out_6 = self._executed_api(images, min=np.abs(0.2), max=np.abs(0.8)) self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8))) self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9))) @@ -229,6 +231,7 @@ class TestClipAPI(unittest.TestCase): np.allclose(out_4.numpy(), (data * 10).astype(np.int32).clip(2, 8))) self.assertTrue( np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8))) + self.assertTrue(np.allclose(out_6.numpy(), data.clip(0.2, 0.8))) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_full_op.py b/python/paddle/fluid/tests/unittests/test_full_op.py index 2442f2b681554ecddbdb5cd7632829a00d248dbb..108469cf8a7323e3ae675c15e89bc1f6ba80c5f9 100644 --- a/python/paddle/fluid/tests/unittests/test_full_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_op.py @@ -108,6 +108,9 @@ class TestFullAPI(unittest.TestCase): shape=[1], dtype=np.float32, value=1.1) out_7 = paddle.full( shape=[1, 2], dtype=np.float32, fill_value=val) + # test for numpy.float64 as fill_value + out_8 = paddle.full_like( + out_7, dtype=np.float32, fill_value=np.abs(1.1)) assert np.array_equal( out_1, np.full( @@ -130,6 +133,9 @@ class TestFullAPI(unittest.TestCase): assert np.array_equal( out_7, np.full( [1, 2], 1.1, dtype="float32")) + assert np.array_equal( + out_8, np.full( + [1, 2], 1.1, dtype="float32")) class TestFullOpError(unittest.TestCase):