未验证 提交 e4cb897e 编写于 作者: A Aurelius84 提交者: GitHub

[Eager]Fix full_like/clip with np.generic type as attribute (#41808) (#41974)

* [Eager]Fix full_like/clip with np.generic type as attribute

* support numpy genertic

* remove usless code
上级 f5d356b8
...@@ -1013,6 +1013,32 @@ paddle::experimental::Tensor& GetTensorFromPyObject(PyObject* obj) { ...@@ -1013,6 +1013,32 @@ paddle::experimental::Tensor& GetTensorFromPyObject(PyObject* obj) {
return reinterpret_cast<TensorObject*>(obj)->tensor; return reinterpret_cast<TensorObject*>(obj)->tensor;
} }
paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name;
if (type_name == "numpy.float64") {
double value = CastPyArg2Double(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else if (type_name == "numpy.float32") {
float value = CastPyArg2Float(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else if (type_name == "numpy.int64") {
int64_t value = CastPyArg2Long(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else if (type_name == "numpy.int32") {
int value = CastPyArg2Int(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"numpy.float32/float64, numpy.int32/int64, but got %s",
op_type, arg_pos + 1, type_name)); // NOLINT
}
}
paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
const std::string& op_type, const std::string& op_type,
ssize_t arg_pos) { ssize_t arg_pos) {
...@@ -1027,6 +1053,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, ...@@ -1027,6 +1053,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
// obj could be: int, float, bool, paddle.Tensor // obj could be: int, float, bool, paddle.Tensor
PyTypeObject* type = obj->ob_type; PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name); auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name;
if (type_name == "int") { if (type_name == "int") {
int value = CastPyArg2Int(obj, op_type, arg_pos); int value = CastPyArg2Int(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value); return paddle::experimental::Scalar(value);
...@@ -1042,7 +1069,8 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, ...@@ -1042,7 +1069,8 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
paddle::experimental::Tensor& value = GetTensorFromPyObject( paddle::experimental::Tensor& value = GetTensorFromPyObject(
op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/); op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/);
return paddle::experimental::Scalar(value); return paddle::experimental::Scalar(value);
} else if (type_name.find("numpy") != std::string::npos) {
return CastNumpy2Scalar(obj, op_type, arg_pos);
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be " "%s(): argument (position %d) must be "
......
...@@ -158,6 +158,10 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, ...@@ -158,6 +158,10 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
const std::string& op_type, const std::string& op_type,
ssize_t arg_pos); ssize_t arg_pos);
paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj, paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type, const std::string& op_type,
ssize_t arg_pos); ssize_t arg_pos);
......
...@@ -174,8 +174,13 @@ void CastPyArg2AttrLong(PyObject* obj, ...@@ -174,8 +174,13 @@ void CastPyArg2AttrLong(PyObject* obj,
float CastPyArg2Float(PyObject* obj, const std::string& op_type, float CastPyArg2Float(PyObject* obj, const std::string& op_type,
ssize_t arg_pos) { ssize_t arg_pos) {
return static_cast<float>(CastPyArg2Double(obj, op_type, arg_pos));
}
double CastPyArg2Double(PyObject* obj, const std::string& op_type,
ssize_t arg_pos) {
if (PyObject_CheckFloatOrToFloat(&obj)) { if (PyObject_CheckFloatOrToFloat(&obj)) {
return (float)PyFloat_AsDouble(obj); // NOLINT return PyFloat_AsDouble(obj); // NOLINT
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be " "%s(): argument (position %d) must be "
......
...@@ -50,6 +50,8 @@ int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type, ...@@ -50,6 +50,8 @@ int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type,
ssize_t arg_pos); ssize_t arg_pos);
float CastPyArg2Float(PyObject* obj, const std::string& op_type, float CastPyArg2Float(PyObject* obj, const std::string& op_type,
ssize_t arg_pos); ssize_t arg_pos);
double CastPyArg2Double(PyObject* obj, const std::string& op_type,
ssize_t arg_pos);
std::string CastPyArg2String(PyObject* obj, const std::string& op_type, std::string CastPyArg2String(PyObject* obj, const std::string& op_type,
ssize_t arg_pos); ssize_t arg_pos);
std::vector<bool> CastPyArg2Booleans(PyObject* obj, const std::string& op_type, std::vector<bool> CastPyArg2Booleans(PyObject* obj, const std::string& op_type,
......
...@@ -221,6 +221,8 @@ class TestClipAPI(unittest.TestCase): ...@@ -221,6 +221,8 @@ class TestClipAPI(unittest.TestCase):
paddle.cast(images * 10, 'int32'), min=2, max=8) paddle.cast(images * 10, 'int32'), min=2, max=8)
out_5 = self._executed_api( out_5 = self._executed_api(
paddle.cast(images * 10, 'int64'), min=2, max=8) paddle.cast(images * 10, 'int64'), min=2, max=8)
# test with numpy.generic
out_6 = self._executed_api(images, min=np.abs(0.2), max=np.abs(0.8))
self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8))) self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8)))
self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9))) self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9)))
...@@ -229,6 +231,7 @@ class TestClipAPI(unittest.TestCase): ...@@ -229,6 +231,7 @@ class TestClipAPI(unittest.TestCase):
np.allclose(out_4.numpy(), (data * 10).astype(np.int32).clip(2, 8))) np.allclose(out_4.numpy(), (data * 10).astype(np.int32).clip(2, 8)))
self.assertTrue( self.assertTrue(
np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8))) np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8)))
self.assertTrue(np.allclose(out_6.numpy(), data.clip(0.2, 0.8)))
def test_eager(self): def test_eager(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -108,6 +108,9 @@ class TestFullAPI(unittest.TestCase): ...@@ -108,6 +108,9 @@ class TestFullAPI(unittest.TestCase):
shape=[1], dtype=np.float32, value=1.1) shape=[1], dtype=np.float32, value=1.1)
out_7 = paddle.full( out_7 = paddle.full(
shape=[1, 2], dtype=np.float32, fill_value=val) shape=[1, 2], dtype=np.float32, fill_value=val)
# test for numpy.float64 as fill_value
out_8 = paddle.full_like(
out_7, dtype=np.float32, fill_value=np.abs(1.1))
assert np.array_equal( assert np.array_equal(
out_1, np.full( out_1, np.full(
...@@ -130,6 +133,9 @@ class TestFullAPI(unittest.TestCase): ...@@ -130,6 +133,9 @@ class TestFullAPI(unittest.TestCase):
assert np.array_equal( assert np.array_equal(
out_7, np.full( out_7, np.full(
[1, 2], 1.1, dtype="float32")) [1, 2], 1.1, dtype="float32"))
assert np.array_equal(
out_8, np.full(
[1, 2], 1.1, dtype="float32"))
class TestFullOpError(unittest.TestCase): class TestFullOpError(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册