未验证 提交 b543998f 编写于 作者: W Weilong Wu 提交者: GitHub

[Cherry-pick, Eager] Fix CastPyArg2scalar for max value of int64 (#42098) (#42129)

* [Eager] Fix CastPyArg2scalar for max value of int64 (#42098)

* [Eager] Fix CastPyArg2Scalar in Long case

* Add more test cases for paddle.clip

* Use PyLong_AsLongLong

* Fix merge conflicts
上级 6ab441bb
......@@ -1054,23 +1054,24 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name;
if (type_name == "int") {
int value = CastPyArg2Int(obj, op_type, arg_pos);
if (PyBool_Check(obj)) {
bool value = CastPyArg2Boolean(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else if (type_name == "float") {
float value = CastPyArg2Float(obj, op_type, arg_pos);
} else if (PyLong_Check(obj)) {
int64_t value = CastPyArg2Long(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else if (type_name == "bool") {
bool value = CastPyArg2Boolean(obj, op_type, arg_pos);
} else if (PyFloat_Check(obj)) {
float value = CastPyArg2Float(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else if (type_name == "Tensor") {
} else if (IsEagerTensor(obj)) {
paddle::experimental::Tensor& value = GetTensorFromPyObject(
op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/);
return paddle::experimental::Scalar(value);
} else if (type_name.find("numpy") != std::string::npos) {
return CastNumpy2Scalar(obj, op_type, arg_pos);
} else if (PyObject_CheckLongOrToLong(&obj)) {
int value = CastPyArg2Int(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
......
......@@ -153,7 +153,7 @@ void CastPyArg2AttrInt(PyObject* obj,
int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type,
ssize_t arg_pos) {
if (PyObject_CheckLongOrToLong(&obj)) {
return (int64_t)PyLong_AsLong(obj); // NOLINT
return (int64_t)PyLong_AsLongLong(obj); // NOLINT
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
......
......@@ -200,7 +200,7 @@ class TestClipAPI(unittest.TestCase):
np.allclose(res11, (data * 10).astype(np.int64).clip(2, 8)))
paddle.disable_static()
def test_clip_dygraph(self):
def func_clip_dygraph(self):
paddle.disable_static()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
......@@ -233,9 +233,29 @@ class TestClipAPI(unittest.TestCase):
np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8)))
self.assertTrue(np.allclose(out_6.numpy(), data.clip(0.2, 0.8)))
def test_eager(self):
def test_clip_dygraph(self):
with _test_eager_guard():
self.func_clip_dygraph()
self.func_clip_dygraph()
def test_clip_dygraph_default_max(self):
paddle.disable_static()
with _test_eager_guard():
self.test_clip_dygraph()
x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
egr_out1 = paddle.clip(x_int32, min=1)
egr_out2 = paddle.clip(x_int64, min=1)
egr_out3 = paddle.clip(x_f32, min=1)
x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
out1 = paddle.clip(x_int32, min=1)
out2 = paddle.clip(x_int64, min=1)
out3 = paddle.clip(x_f32, min=1)
self.assertTrue(np.allclose(out1.numpy(), egr_out1.numpy()))
self.assertTrue(np.allclose(out2.numpy(), egr_out2.numpy()))
self.assertTrue(np.allclose(out3.numpy(), egr_out3.numpy()))
def test_errors(self):
paddle.enable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册