Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
1ecc39b4
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1ecc39b4
编写于
9月 28, 2022
作者:
W
Weilong Wu
提交者:
GitHub
9月 28, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
merge develop (#46520)
上级
b828557f
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
180 addition
and
4 deletion
+180
-4
paddle/fluid/pybind/eager_math_op_patch.cc
paddle/fluid/pybind/eager_math_op_patch.cc
+178
-0
python/paddle/fluid/dygraph/math_op_patch.py
python/paddle/fluid/dygraph/math_op_patch.py
+2
-4
未找到文件。
paddle/fluid/pybind/eager_math_op_patch.cc
浏览文件 @
1ecc39b4
...
@@ -763,6 +763,176 @@ static PyObject* tensor__rdiv__method(TensorObject* self,
...
@@ -763,6 +763,176 @@ static PyObject* tensor__rdiv__method(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
EAGER_CATCH_AND_THROW_RETURN_NULL
}
}
static
PyObject
*
tensor__gt__method
(
TensorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
paddle
::
platform
::
RecordEvent
pythonc_record_event
(
"__gt__ pybind_patch_func"
,
paddle
::
platform
::
TracerEventType
::
UserDefined
,
1
);
EAGER_TRY
VLOG
(
1
)
<<
"Running Eager tensor__gt__method"
;
// Set Device ID
auto
place
=
egr
::
Controller
::
Instance
().
GetExpectedPlace
();
SetDevice
(
place
);
paddle
::
experimental
::
Tensor
ret
;
paddle
::
experimental
::
Tensor
self_tensor
=
self
->
tensor
;
PyObject
*
other_obj
=
PyTuple_GET_ITEM
(
args
,
0
);
// 1. scalar exists cases
// there is no scalar function for __gt__ now
float
other_float
=
0.0
;
bool
has_other_float
=
false
;
if
(
PyFloat_Check
(
other_obj
)
||
PyCheckInteger
(
other_obj
)
||
IsNumpyType
(
other_obj
))
{
if
(
PyFloat_Check
(
other_obj
))
{
other_float
=
CastPyArg2AttrFloat
(
other_obj
,
0
);
has_other_float
=
true
;
if
(
_supported_int_dtype_
.
find
(
self_tensor
.
dtype
())
!=
_supported_int_dtype_
.
end
())
{
eager_gil_scoped_release
guard
;
self_tensor
=
cast_ad_func
(
self_tensor
,
DataType
::
FLOAT32
);
}
}
else
if
(
PyCheckInteger
(
other_obj
)
||
IsNumpyType
(
other_obj
))
{
other_float
=
static_cast
<
float
>
(
CastPyArg2AttrInt
(
other_obj
,
0
));
has_other_float
=
true
;
}
}
// 2. create or get tensor for other_obj
paddle
::
experimental
::
Tensor
other_tensor
;
if
(
has_other_float
)
{
eager_gil_scoped_release
guard
;
other_tensor
=
full_ad_func
(
self_tensor
.
shape
(),
phi
::
Scalar
(
other_float
),
self_tensor
.
dtype
(),
place
);
}
else
if
(
!
PyCheckTensor
(
other_obj
))
{
paddle
::
experimental
::
Scalar
value
=
CastPyArg2Scalar
(
other_obj
,
"__gt__"
,
0
);
if
(
PyComplex_Check
(
other_obj
))
{
eager_gil_scoped_release
guard
;
other_tensor
=
full_ad_func
({
1
},
value
,
DataType
::
COMPLEX64
,
place
);
}
else
{
eager_gil_scoped_release
guard
;
other_tensor
=
full_ad_func
(
self_tensor
.
shape
(),
value
,
self_tensor
.
dtype
(),
place
);
}
}
else
{
other_tensor
=
CastPyArg2Tensor
(
other_obj
,
0
);
}
// 3. promote types or unify right var type to left var
phi
::
DataType
lhs_dtype
=
self_tensor
.
dtype
();
phi
::
DataType
rhs_dtype
=
other_tensor
.
dtype
();
if
(
lhs_dtype
!=
rhs_dtype
)
{
LOG
(
WARNING
)
<<
"The dtype of left and right Tensor are not the same, left "
"dtype is "
<<
lhs_dtype
<<
", but right dtype is "
<<
rhs_dtype
<<
", the right dtype will convert to "
<<
lhs_dtype
;
eager_gil_scoped_release
guard
;
other_tensor
=
cast_ad_func
(
other_tensor
,
lhs_dtype
);
}
// 4. calculation
VLOG
(
6
)
<<
"Calling greater_than_ad_func in tensor__gt__method"
;
{
eager_gil_scoped_release
guard
;
ret
=
greater_than_ad_func
(
self_tensor
,
other_tensor
,
-
1
);
}
return
ToPyObject
(
ret
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static
PyObject
*
tensor__ge__method
(
TensorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
paddle
::
platform
::
RecordEvent
pythonc_record_event
(
"__ge__ pybind_patch_func"
,
paddle
::
platform
::
TracerEventType
::
UserDefined
,
1
);
EAGER_TRY
VLOG
(
1
)
<<
"Running Eager tensor__ge__method"
;
// Set Device ID
auto
place
=
egr
::
Controller
::
Instance
().
GetExpectedPlace
();
SetDevice
(
place
);
paddle
::
experimental
::
Tensor
ret
;
paddle
::
experimental
::
Tensor
self_tensor
=
self
->
tensor
;
PyObject
*
other_obj
=
PyTuple_GET_ITEM
(
args
,
0
);
// 1. scalar exists cases
// there is no scalar function for __ge__ now
float
other_float
=
0.0
;
bool
has_other_float
=
false
;
if
(
PyFloat_Check
(
other_obj
)
||
PyCheckInteger
(
other_obj
)
||
IsNumpyType
(
other_obj
))
{
if
(
PyFloat_Check
(
other_obj
))
{
other_float
=
CastPyArg2AttrFloat
(
other_obj
,
0
);
has_other_float
=
true
;
if
(
_supported_int_dtype_
.
find
(
self_tensor
.
dtype
())
!=
_supported_int_dtype_
.
end
())
{
eager_gil_scoped_release
guard
;
self_tensor
=
cast_ad_func
(
self_tensor
,
DataType
::
FLOAT32
);
}
}
else
if
(
PyCheckInteger
(
other_obj
)
||
IsNumpyType
(
other_obj
))
{
other_float
=
static_cast
<
float
>
(
CastPyArg2AttrInt
(
other_obj
,
0
));
has_other_float
=
true
;
}
}
// 2. create or get tensor for other_obj
paddle
::
experimental
::
Tensor
other_tensor
;
if
(
has_other_float
)
{
eager_gil_scoped_release
guard
;
other_tensor
=
full_ad_func
(
self_tensor
.
shape
(),
phi
::
Scalar
(
other_float
),
self_tensor
.
dtype
(),
place
);
}
else
if
(
!
PyCheckTensor
(
other_obj
))
{
paddle
::
experimental
::
Scalar
value
=
CastPyArg2Scalar
(
other_obj
,
"__ge__"
,
0
);
if
(
PyComplex_Check
(
other_obj
))
{
eager_gil_scoped_release
guard
;
other_tensor
=
full_ad_func
({
1
},
value
,
DataType
::
COMPLEX64
,
place
);
}
else
{
eager_gil_scoped_release
guard
;
other_tensor
=
full_ad_func
(
self_tensor
.
shape
(),
value
,
self_tensor
.
dtype
(),
place
);
}
}
else
{
other_tensor
=
CastPyArg2Tensor
(
other_obj
,
0
);
}
// 3. promote types or unify right var type to left var
phi
::
DataType
lhs_dtype
=
self_tensor
.
dtype
();
phi
::
DataType
rhs_dtype
=
other_tensor
.
dtype
();
if
(
lhs_dtype
!=
rhs_dtype
)
{
LOG
(
WARNING
)
<<
"The dtype of left and right Tensor are not the same, left "
"dtype is "
<<
lhs_dtype
<<
", but right dtype is "
<<
rhs_dtype
<<
", the right dtype will convert to "
<<
lhs_dtype
;
eager_gil_scoped_release
guard
;
other_tensor
=
cast_ad_func
(
other_tensor
,
lhs_dtype
);
}
// 4. calculation
VLOG
(
6
)
<<
"Calling greater_equal_ad_func in tensor__ge__method"
;
{
eager_gil_scoped_release
guard
;
ret
=
greater_equal_ad_func
(
self_tensor
,
other_tensor
,
-
1
);
}
return
ToPyObject
(
ret
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef
math_op_patch_methods
[]
=
{
PyMethodDef
math_op_patch_methods
[]
=
{
{
"__add__"
,
{
"__add__"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__add__method
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__add__method
,
...
@@ -804,6 +974,14 @@ PyMethodDef math_op_patch_methods[] = {
...
@@ -804,6 +974,14 @@ PyMethodDef math_op_patch_methods[] = {
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__rdiv__method
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__rdiv__method
,
METH_VARARGS
|
METH_KEYWORDS
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
NULL
},
{
"__gt__"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__gt__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
"__ge__"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__ge__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
NULL
,
NULL
,
0
,
NULL
}};
{
NULL
,
NULL
,
0
,
NULL
}};
}
// namespace pybind
}
// namespace pybind
...
...
python/paddle/fluid/dygraph/math_op_patch.py
浏览文件 @
1ecc39b4
...
@@ -402,10 +402,6 @@ def monkey_patch_math_varbase():
...
@@ -402,10 +402,6 @@ def monkey_patch_math_varbase():
(
'__ne__'
,
_binary_creator_
(
'__ne__'
,
'not_equal'
,
False
,
None
,
True
)),
(
'__ne__'
,
_binary_creator_
(
'__ne__'
,
'not_equal'
,
False
,
None
,
True
)),
(
'__lt__'
,
_binary_creator_
(
'__lt__'
,
'less_than'
,
False
,
None
,
True
)),
(
'__lt__'
,
_binary_creator_
(
'__lt__'
,
'less_than'
,
False
,
None
,
True
)),
(
'__le__'
,
_binary_creator_
(
'__le__'
,
'less_equal'
,
False
,
None
,
True
)),
(
'__le__'
,
_binary_creator_
(
'__le__'
,
'less_equal'
,
False
,
None
,
True
)),
(
'__gt__'
,
_binary_creator_
(
'__gt__'
,
'greater_than'
,
False
,
None
,
True
)),
(
'__ge__'
,
_binary_creator_
(
'__ge__'
,
'greater_equal'
,
False
,
None
,
True
)),
(
'__array_ufunc__'
,
None
)
(
'__array_ufunc__'
,
None
)
]
]
...
@@ -420,6 +416,8 @@ def monkey_patch_math_varbase():
...
@@ -420,6 +416,8 @@ def monkey_patch_math_varbase():
'__truediv__'
,
'__truediv__'
,
'__rdiv__'
,
'__rdiv__'
,
'__rtruediv__'
,
'__rtruediv__'
,
'__gt__'
,
'__ge__'
,
]
]
global
_already_patch_varbase
global
_already_patch_varbase
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录