Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
0521af4e
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0521af4e
编写于
10月 26, 2022
作者:
W
Weilong Wu
提交者:
GitHub
10月 26, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Eager, Performance optimization] support equal under cpp (#47315)
* [Eager, Performance optimization] support equal under c++ directly
上级
aab21d1a
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
95 addition
and
1 deletion
+95
-1
paddle/fluid/pybind/eager_math_op_patch.cc
paddle/fluid/pybind/eager_math_op_patch.cc
+90
-0
python/paddle/fluid/dygraph/math_op_patch.py
python/paddle/fluid/dygraph/math_op_patch.py
+1
-1
python/paddle/fluid/dygraph/varbase_patch_methods.py
python/paddle/fluid/dygraph/varbase_patch_methods.py
+4
-0
未找到文件。
paddle/fluid/pybind/eager_math_op_patch.cc
浏览文件 @
0521af4e
...
@@ -1643,6 +1643,92 @@ static PyObject* tensor__ne__method(TensorObject* self,
...
@@ -1643,6 +1643,92 @@ static PyObject* tensor__ne__method(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
EAGER_CATCH_AND_THROW_RETURN_NULL
}
}
static
PyObject
*
tensor__eq__method
(
TensorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
paddle
::
platform
::
RecordEvent
pythonc_record_event
(
"__eq__ pybind_patch_func"
,
paddle
::
platform
::
TracerEventType
::
UserDefined
,
1
);
EAGER_TRY
VLOG
(
6
)
<<
"Running Eager tensor__eq__method"
;
// Set Device ID
auto
place
=
egr
::
Controller
::
Instance
().
GetExpectedPlace
();
SetDevice
(
place
);
paddle
::
experimental
::
Tensor
ret
;
paddle
::
experimental
::
Tensor
self_tensor
=
self
->
tensor
;
PyObject
*
other_obj
=
PyTuple_GET_ITEM
(
args
,
0
);
// 1. scalar exists cases
// there is no scalar function for __eq__ now
double
other_double
=
0.0
;
bool
has_other_double
=
false
;
if
(
PyFloat_Check
(
other_obj
)
||
PyCheckInteger
(
other_obj
)
||
IsNumpyType
(
other_obj
))
{
if
(
PyFloat_Check
(
other_obj
))
{
other_double
=
CastPyArg2Double
(
other_obj
,
"__eq__"
,
0
);
has_other_double
=
true
;
if
(
_supported_int_dtype_
.
find
(
self_tensor
.
dtype
())
!=
_supported_int_dtype_
.
end
())
{
eager_gil_scoped_release
guard
;
self_tensor
=
cast_ad_func
(
self_tensor
,
DataType
::
FLOAT32
);
}
}
else
if
(
PyCheckInteger
(
other_obj
)
||
IsNumpyType
(
other_obj
))
{
other_double
=
CastPyArg2Double
(
other_obj
,
"__eq__"
,
0
);
has_other_double
=
true
;
}
}
// 2. create or get tensor for other_obj
paddle
::
experimental
::
Tensor
other_tensor
;
if
(
has_other_double
)
{
eager_gil_scoped_release
guard
;
other_tensor
=
full_ad_func
(
self_tensor
.
shape
(),
phi
::
Scalar
(
other_double
),
self_tensor
.
dtype
(),
self_tensor
.
place
());
}
else
if
(
!
PyCheckTensor
(
other_obj
))
{
paddle
::
experimental
::
Scalar
value
=
CastPyArg2Scalar
(
other_obj
,
"__eq__"
,
0
);
if
(
PyComplex_Check
(
other_obj
))
{
eager_gil_scoped_release
guard
;
other_tensor
=
full_ad_func
({
1
},
value
,
DataType
::
COMPLEX64
,
self_tensor
.
place
());
}
else
{
eager_gil_scoped_release
guard
;
other_tensor
=
full_ad_func
({
1
},
value
,
self_tensor
.
dtype
(),
self_tensor
.
place
());
}
}
else
{
other_tensor
=
CastPyArg2Tensor
(
other_obj
,
0
);
}
// 3. promote types or unify right var type to left var
phi
::
DataType
lhs_dtype
=
self_tensor
.
dtype
();
phi
::
DataType
rhs_dtype
=
other_tensor
.
dtype
();
if
(
lhs_dtype
!=
rhs_dtype
)
{
VLOG
(
6
)
<<
"The dtype of left and right Tensor are not the same, left "
"dtype is "
<<
lhs_dtype
<<
", but right dtype is "
<<
rhs_dtype
<<
", the right dtype will convert to "
<<
lhs_dtype
;
eager_gil_scoped_release
guard
;
other_tensor
=
cast_ad_func
(
other_tensor
,
lhs_dtype
);
}
// 4. calculation
VLOG
(
6
)
<<
"Calling equal_ad_func in tensor__eq__method"
;
{
eager_gil_scoped_release
guard
;
ret
=
equal_ad_func
(
self_tensor
,
other_tensor
,
-
1
);
}
return
ToPyObject
(
ret
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef
math_op_patch_methods
[]
=
{
PyMethodDef
math_op_patch_methods
[]
=
{
{
"__add__"
,
{
"__add__"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__add__method
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__add__method
,
...
@@ -1720,6 +1806,10 @@ PyMethodDef math_op_patch_methods[] = {
...
@@ -1720,6 +1806,10 @@ PyMethodDef math_op_patch_methods[] = {
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__le__method
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__le__method
,
METH_VARARGS
|
METH_KEYWORDS
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
NULL
},
{
"__eq__"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__eq__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
"__ne__"
,
{
"__ne__"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__ne__method
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__ne__method
,
METH_VARARGS
|
METH_KEYWORDS
,
METH_VARARGS
|
METH_KEYWORDS
,
...
...
python/paddle/fluid/dygraph/math_op_patch.py
浏览文件 @
0521af4e
...
@@ -464,7 +464,6 @@ def monkey_patch_math_varbase():
...
@@ -464,7 +464,6 @@ def monkey_patch_math_varbase():
(
'size'
,
_size_
),
(
'size'
,
_size_
),
(
'T'
,
_T_
),
(
'T'
,
_T_
),
# for logical compare
# for logical compare
(
'__eq__'
,
_binary_creator_
(
'__eq__'
,
'equal'
,
False
,
None
,
True
)),
(
'__array_ufunc__'
,
None
),
(
'__array_ufunc__'
,
None
),
]
]
...
@@ -488,6 +487,7 @@ def monkey_patch_math_varbase():
...
@@ -488,6 +487,7 @@ def monkey_patch_math_varbase():
'__floordiv__'
,
'__floordiv__'
,
'__pow__'
,
'__pow__'
,
'__rpow__'
,
'__rpow__'
,
'__eq__'
,
'__ne__'
,
'__ne__'
,
]
]
...
...
python/paddle/fluid/dygraph/varbase_patch_methods.py
浏览文件 @
0521af4e
...
@@ -1017,6 +1017,9 @@ def monkey_patch_varbase():
...
@@ -1017,6 +1017,9 @@ def monkey_patch_varbase():
return
_C_ops
.
sparse_to_sparse_coo
(
self
,
sparse_dim
)
return
_C_ops
.
sparse_to_sparse_coo
(
self
,
sparse_dim
)
def
__hash__
(
self
):
return
hash
(
id
(
self
))
if
framework
.
_in_eager_mode_
and
not
hasattr
(
core
,
"eager"
):
if
framework
.
_in_eager_mode_
and
not
hasattr
(
core
,
"eager"
):
return
return
...
@@ -1060,6 +1063,7 @@ def monkey_patch_varbase():
...
@@ -1060,6 +1063,7 @@ def monkey_patch_varbase():
setattr
(
core
.
eager
.
Tensor
,
"_numel"
,
_numel
)
setattr
(
core
.
eager
.
Tensor
,
"_numel"
,
_numel
)
setattr
(
core
.
eager
.
Tensor
,
"_uva"
,
_uva
)
setattr
(
core
.
eager
.
Tensor
,
"_uva"
,
_uva
)
setattr
(
core
.
eager
.
Tensor
,
"_clear_data"
,
_clear_data
)
setattr
(
core
.
eager
.
Tensor
,
"_clear_data"
,
_clear_data
)
setattr
(
core
.
eager
.
Tensor
,
"__hash__"
,
__hash__
)
else
:
else
:
setattr
(
core
.
VarBase
,
"__name__"
,
"Tensor"
)
setattr
(
core
.
VarBase
,
"__name__"
,
"Tensor"
)
setattr
(
core
.
VarBase
,
"grad"
,
grad
)
setattr
(
core
.
VarBase
,
"grad"
,
grad
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录