未验证 提交 0521af4e 编写于 作者: W Weilong Wu 提交者: GitHub

[Eager, Performance optimization] support equal under cpp (#47315)

* [Eager, Performance optimization] support equal under c++ directly
上级 aab21d1a
...@@ -1643,6 +1643,92 @@ static PyObject* tensor__ne__method(TensorObject* self, ...@@ -1643,6 +1643,92 @@ static PyObject* tensor__ne__method(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* tensor__eq__method(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
paddle::platform::RecordEvent pythonc_record_event(
"__eq__ pybind_patch_func",
paddle::platform::TracerEventType::UserDefined,
1);
EAGER_TRY
VLOG(6) << "Running Eager tensor__eq__method";
// Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace();
SetDevice(place);
paddle::experimental::Tensor ret;
paddle::experimental::Tensor self_tensor = self->tensor;
PyObject* other_obj = PyTuple_GET_ITEM(args, 0);
// 1. scalar exists cases
// there is no scalar function for __eq__ now
double other_double = 0.0;
bool has_other_double = false;
if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) ||
IsNumpyType(other_obj)) {
if (PyFloat_Check(other_obj)) {
other_double = CastPyArg2Double(other_obj, "__eq__", 0);
has_other_double = true;
if (_supported_int_dtype_.find(self_tensor.dtype()) !=
_supported_int_dtype_.end()) {
eager_gil_scoped_release guard;
self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32);
}
} else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) {
other_double = CastPyArg2Double(other_obj, "__eq__", 0);
has_other_double = true;
}
}
// 2. create or get tensor for other_obj
paddle::experimental::Tensor other_tensor;
if (has_other_double) {
eager_gil_scoped_release guard;
other_tensor = full_ad_func(self_tensor.shape(),
phi::Scalar(other_double),
self_tensor.dtype(),
self_tensor.place());
} else if (!PyCheckTensor(other_obj)) {
paddle::experimental::Scalar value =
CastPyArg2Scalar(other_obj, "__eq__", 0);
if (PyComplex_Check(other_obj)) {
eager_gil_scoped_release guard;
other_tensor =
full_ad_func({1}, value, DataType::COMPLEX64, self_tensor.place());
} else {
eager_gil_scoped_release guard;
other_tensor =
full_ad_func({1}, value, self_tensor.dtype(), self_tensor.place());
}
} else {
other_tensor = CastPyArg2Tensor(other_obj, 0);
}
// 3. promote types or unify right var type to left var
phi::DataType lhs_dtype = self_tensor.dtype();
phi::DataType rhs_dtype = other_tensor.dtype();
if (lhs_dtype != rhs_dtype) {
VLOG(6) << "The dtype of left and right Tensor are not the same, left "
"dtype is "
<< lhs_dtype << ", but right dtype is " << rhs_dtype
<< ", the right dtype will convert to " << lhs_dtype;
eager_gil_scoped_release guard;
other_tensor = cast_ad_func(other_tensor, lhs_dtype);
}
// 4. calculation
VLOG(6) << "Calling equal_ad_func in tensor__eq__method";
{
eager_gil_scoped_release guard;
ret = equal_ad_func(self_tensor, other_tensor, -1);
}
return ToPyObject(ret);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef math_op_patch_methods[] = { PyMethodDef math_op_patch_methods[] = {
{"__add__", {"__add__",
(PyCFunction)(void (*)(void))tensor__add__method, (PyCFunction)(void (*)(void))tensor__add__method,
...@@ -1720,6 +1806,10 @@ PyMethodDef math_op_patch_methods[] = { ...@@ -1720,6 +1806,10 @@ PyMethodDef math_op_patch_methods[] = {
(PyCFunction)(void (*)(void))tensor__le__method, (PyCFunction)(void (*)(void))tensor__le__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, NULL},
{"__eq__",
(PyCFunction)(void (*)(void))tensor__eq__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__ne__", {"__ne__",
(PyCFunction)(void (*)(void))tensor__ne__method, (PyCFunction)(void (*)(void))tensor__ne__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
......
...@@ -464,7 +464,6 @@ def monkey_patch_math_varbase(): ...@@ -464,7 +464,6 @@ def monkey_patch_math_varbase():
('size', _size_), ('size', _size_),
('T', _T_), ('T', _T_),
# for logical compare # for logical compare
('__eq__', _binary_creator_('__eq__', 'equal', False, None, True)),
('__array_ufunc__', None), ('__array_ufunc__', None),
] ]
...@@ -488,6 +487,7 @@ def monkey_patch_math_varbase(): ...@@ -488,6 +487,7 @@ def monkey_patch_math_varbase():
'__floordiv__', '__floordiv__',
'__pow__', '__pow__',
'__rpow__', '__rpow__',
'__eq__',
'__ne__', '__ne__',
] ]
......
...@@ -1017,6 +1017,9 @@ def monkey_patch_varbase(): ...@@ -1017,6 +1017,9 @@ def monkey_patch_varbase():
return _C_ops.sparse_to_sparse_coo(self, sparse_dim) return _C_ops.sparse_to_sparse_coo(self, sparse_dim)
def __hash__(self):
return hash(id(self))
if framework._in_eager_mode_ and not hasattr(core, "eager"): if framework._in_eager_mode_ and not hasattr(core, "eager"):
return return
...@@ -1060,6 +1063,7 @@ def monkey_patch_varbase(): ...@@ -1060,6 +1063,7 @@ def monkey_patch_varbase():
setattr(core.eager.Tensor, "_numel", _numel) setattr(core.eager.Tensor, "_numel", _numel)
setattr(core.eager.Tensor, "_uva", _uva) setattr(core.eager.Tensor, "_uva", _uva)
setattr(core.eager.Tensor, "_clear_data", _clear_data) setattr(core.eager.Tensor, "_clear_data", _clear_data)
setattr(core.eager.Tensor, "__hash__", __hash__)
else: else:
setattr(core.VarBase, "__name__", "Tensor") setattr(core.VarBase, "__name__", "Tensor")
setattr(core.VarBase, "grad", grad) setattr(core.VarBase, "grad", grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册