diff --git a/paddle/fluid/pybind/eager_math_op_patch.cc b/paddle/fluid/pybind/eager_math_op_patch.cc index 7e4f7b0cfac7e896b8abf4a864b4c92649bccdc3..887eb73c0b1b0c6a6ed2c950aefa465894fdb6ad 100644 --- a/paddle/fluid/pybind/eager_math_op_patch.cc +++ b/paddle/fluid/pybind/eager_math_op_patch.cc @@ -224,11 +224,10 @@ static PyObject* tensor__add__method(TensorObject* self, other_tensor = cast_ad_func(other_tensor, promote_dtype); } } else { - LOG(WARNING) - << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -320,11 +319,10 @@ static PyObject* tensor__sub__method(TensorObject* self, other_tensor = cast_ad_func(other_tensor, promote_dtype); } } else { - LOG(WARNING) - << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -414,11 +412,10 @@ static PyObject* tensor__rsub__method(TensorObject* self, other_tensor = cast_ad_func(other_tensor, promote_dtype); } } else { - LOG(WARNING) - << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -515,11 +512,10 @@ static PyObject* tensor__mul__method(TensorObject* self, other_tensor = cast_ad_func(other_tensor, promote_dtype); } } else { - LOG(WARNING) - << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -617,11 +613,10 @@ static PyObject* tensor__div__method(TensorObject* self, other_tensor = cast_ad_func(other_tensor, promote_dtype); } } else { - LOG(WARNING) - << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -733,11 +728,10 @@ static PyObject* tensor__rdiv__method(TensorObject* self, other_tensor = cast_ad_func(other_tensor, promote_dtype); } } else { - LOG(WARNING) - << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -829,10 +823,10 @@ static PyObject* tensor__gt__method(TensorObject* self, phi::DataType lhs_dtype = self_tensor.dtype(); phi::DataType rhs_dtype = other_tensor.dtype(); if (lhs_dtype != rhs_dtype) { - LOG(WARNING) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -914,10 +908,10 @@ static PyObject* tensor__ge__method(TensorObject* self, phi::DataType lhs_dtype = self_tensor.dtype(); phi::DataType rhs_dtype = other_tensor.dtype(); if (lhs_dtype != rhs_dtype) { - LOG(WARNING) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -933,6 +927,192 @@ static PyObject* tensor__ge__method(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor__mod__method(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + paddle::platform::RecordEvent pythonc_record_event( + "__mod__ pybind_patch_func", + paddle::platform::TracerEventType::UserDefined, + 1); + EAGER_TRY + + VLOG(6) << "Running Eager tensor__mod__method"; + + // Set Device ID + auto place = egr::Controller::Instance().GetExpectedPlace(); + SetDevice(place); + + paddle::experimental::Tensor ret; + paddle::experimental::Tensor self_tensor = self->tensor; + + PyObject* other_obj = PyTuple_GET_ITEM(args, 0); + + // 1. scalar exists cases + // there is no scalar_mod function for __mod__ now + float other_float = 0.0; + bool has_other_float = false; + if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || + IsNumpyType(other_obj)) { + if (PyFloat_Check(other_obj)) { + other_float = CastPyArg2AttrFloat(other_obj, 0); + has_other_float = true; + if (_supported_int_dtype_.find(self_tensor.dtype()) != + _supported_int_dtype_.end()) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); + } + } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { + other_float = static_cast(CastPyArg2AttrInt(other_obj, 0)); + has_other_float = true; + } + } + + // 2. create or get tensor for other_obj + paddle::experimental::Tensor other_tensor; + if (has_other_float) { + eager_gil_scoped_release guard; + other_tensor = full_ad_func(self_tensor.shape(), + phi::Scalar(other_float), + self_tensor.dtype(), + place); + } else if (!PyCheckTensor(other_obj)) { + paddle::experimental::Scalar value = + CastPyArg2Scalar(other_obj, "__mod__", 0); + if (PyComplex_Check(other_obj)) { + eager_gil_scoped_release guard; + other_tensor = full_ad_func({1}, value, DataType::COMPLEX64, place); + } else { + eager_gil_scoped_release guard; + other_tensor = + full_ad_func(self_tensor.shape(), value, self_tensor.dtype(), place); + } + } else { + other_tensor = CastPyArg2Tensor(other_obj, 0); + } + + // 3. promote types or unify right var type to left var + phi::DataType lhs_dtype = self_tensor.dtype(); + phi::DataType rhs_dtype = other_tensor.dtype(); + if (lhs_dtype != rhs_dtype) { + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; + eager_gil_scoped_release guard; + other_tensor = cast_ad_func(other_tensor, lhs_dtype); + } + + // 4. calculation + VLOG(6) << "Calling remainder_ad_func in tensor__mod__method"; + { + eager_gil_scoped_release guard; + ret = remainder_ad_func(self_tensor, other_tensor); + } + return ToPyObject(ret); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* tensor__matmul__method(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + paddle::platform::RecordEvent pythonc_record_event( + "__matmul__ pybind_patch_func", + paddle::platform::TracerEventType::UserDefined, + 1); + EAGER_TRY + + VLOG(6) << "Running Eager tensor__matmul__method"; + + // Set Device ID + auto place = egr::Controller::Instance().GetExpectedPlace(); + SetDevice(place); + + paddle::experimental::Tensor ret; + paddle::experimental::Tensor self_tensor = self->tensor; + + PyObject* other_obj = PyTuple_GET_ITEM(args, 0); + + // 1. scalar exists cases + // there is no scalar_matmul function for __matmul__ now + float other_float = 0.0; + bool has_other_float = false; + if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || + IsNumpyType(other_obj)) { + if (PyFloat_Check(other_obj)) { + other_float = CastPyArg2AttrFloat(other_obj, 0); + has_other_float = true; + if (_supported_int_dtype_.find(self_tensor.dtype()) != + _supported_int_dtype_.end()) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); + } + } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { + other_float = static_cast(CastPyArg2AttrInt(other_obj, 0)); + has_other_float = true; + } + } + + // 2. create or get tensor for other_obj + paddle::experimental::Tensor other_tensor; + if (has_other_float) { + eager_gil_scoped_release guard; + other_tensor = + full_ad_func({1}, phi::Scalar(other_float), self_tensor.dtype(), place); + } else if (!PyCheckTensor(other_obj)) { + paddle::experimental::Scalar value = + CastPyArg2Scalar(other_obj, "__matmul__", 0); + if (PyComplex_Check(other_obj)) { + eager_gil_scoped_release guard; + other_tensor = full_ad_func({1}, value, DataType::COMPLEX64, place); + } else { + eager_gil_scoped_release guard; + other_tensor = full_ad_func({1}, value, self_tensor.dtype(), place); + } + } else { + other_tensor = CastPyArg2Tensor(other_obj, 0); + } + + // 3. promote types or unify right var type to left var + phi::DataType lhs_dtype = self_tensor.dtype(); + phi::DataType rhs_dtype = other_tensor.dtype(); + if (lhs_dtype != rhs_dtype) { + // note: only op_type in _supported_promote_complex_types_ should promote + // dtype + if (_complex_dtypes.find(lhs_dtype) != _complex_dtypes.end() || + _complex_dtypes.find(rhs_dtype) != _complex_dtypes.end()) { + phi::DataType promote_dtype = + framework::TransToPhiDataType(framework::PromoteTypesIfComplexExists( + framework::TransToProtoVarType(lhs_dtype), + framework::TransToProtoVarType(rhs_dtype))); + if (lhs_dtype != promote_dtype) { + // cast + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, promote_dtype); + } + if (rhs_dtype != promote_dtype) { + eager_gil_scoped_release guard; + other_tensor = cast_ad_func(other_tensor, promote_dtype); + } + } else { + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; + eager_gil_scoped_release guard; + other_tensor = cast_ad_func(other_tensor, lhs_dtype); + } + } + + // 4. calculation + VLOG(6) << "Calling matmul_ad_func in tensor__matmul__method"; + { + eager_gil_scoped_release guard; + ret = matmul_ad_func(self_tensor, other_tensor, false, false); + } + return ToPyObject(ret); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + static PyObject* tensor__lt__method(TensorObject* self, PyObject* args, PyObject* kwargs) { @@ -999,10 +1179,10 @@ static PyObject* tensor__lt__method(TensorObject* self, phi::DataType lhs_dtype = self_tensor.dtype(); phi::DataType rhs_dtype = other_tensor.dtype(); if (lhs_dtype != rhs_dtype) { - LOG(WARNING) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -1084,10 +1264,10 @@ static PyObject* tensor__le__method(TensorObject* self, phi::DataType lhs_dtype = self_tensor.dtype(); phi::DataType rhs_dtype = other_tensor.dtype(); if (lhs_dtype != rhs_dtype) { - LOG(WARNING) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + VLOG(6) << "The dtype of left and right Tensor are not the same, left " + "dtype is " + << lhs_dtype << ", but right dtype is " << rhs_dtype + << ", the right dtype will convert to " << lhs_dtype; eager_gil_scoped_release guard; other_tensor = cast_ad_func(other_tensor, lhs_dtype); } @@ -1144,6 +1324,14 @@ PyMethodDef math_op_patch_methods[] = { (PyCFunction)(void (*)(void))tensor__rdiv__method, METH_VARARGS | METH_KEYWORDS, NULL}, + {"__mod__", + (PyCFunction)(void (*)(void))tensor__mod__method, + METH_VARARGS | METH_KEYWORDS, + NULL}, + {"__matmul__", + (PyCFunction)(void (*)(void))tensor__matmul__method, + METH_VARARGS | METH_KEYWORDS, + NULL}, {"__gt__", (PyCFunction)(void (*)(void))tensor__gt__method, METH_VARARGS | METH_KEYWORDS, diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index eca775df5307b667f2643f5bfea696a7a01e17c5..b875013415867f9adf637c0c34b9a93fef6983b6 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -393,10 +393,6 @@ def monkey_patch_math_varbase(): None)), ('__floordiv__', _binary_creator_('__floordiv__', 'floor_divide', False, None, True)), - ('__mod__', _binary_creator_('__mod__', 'remainder', False, None, - True)), - ('__matmul__', - _binary_creator_('__matmul__', "matmul", False, None, True)), # for logical compare ('__eq__', _binary_creator_('__eq__', 'equal', False, None, True)), ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None, True)), @@ -414,6 +410,8 @@ def monkey_patch_math_varbase(): '__truediv__', '__rdiv__', '__rtruediv__', + '__mod__', + '__matmul__', '__gt__', '__ge__', '__lt__', diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py index 58eec1be269df0c6ce3b03363b9b52506825823c..05e774451b8a8aaaba0c909c803c17b1779c5e68 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py @@ -732,6 +732,22 @@ class TestMathOpPatchesVarBase(unittest.TestCase): self.func_test_complex_scalar() self.func_test_complex_scalar() + def func_test_matmul(self): + x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) + y_np = np.random.uniform(-1, 1, [3, 2]).astype(self.dtype) + except_out = x_np @ y_np + + with fluid.dygraph.guard(): + x = paddle.to_tensor(x_np) + y = paddle.to_tensor(y_np) + out = x @ y + np.testing.assert_allclose(out.numpy(), except_out, atol=1e-03) + + def test_matmul(self): + with _test_eager_guard(): + self.func_test_matmul() + self.func_test_matmul() + if __name__ == '__main__': unittest.main()