Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
69e82d83
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
69e82d83
编写于
6月 29, 2022
作者:
R
ronnywang
提交者:
GitHub
6月 29, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cherry pick 43890 (#43892)
* cherry pick 43890
上级
dc12605d
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
830 addition
and
603 deletion
+830
-603
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+830
-603
未找到文件。
paddle/fluid/pybind/pybind.cc
浏览文件 @
69e82d83
...
...
@@ -372,7 +372,8 @@ static T PyObjectCast(PyObject *obj) {
}
catch
(
py
::
cast_error
&
)
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Python object is not type of %s, the real type is %s"
,
typeid
(
T
).
name
(),
obj
->
ob_type
->
tp_name
));
typeid
(
T
).
name
(),
obj
->
ob_type
->
tp_name
));
}
}
...
...
@@ -431,7 +432,8 @@ static std::vector<std::string> inline GetNameList(
}
static
void
inline
CreateVariableIfNotExit
(
const
py
::
handle
&
py_handle
,
const
framework
::
Scope
&
scope
,
const
py
::
handle
&
py_handle
,
const
framework
::
Scope
&
scope
,
const
framework
::
Executor
*
exe
=
nullptr
)
{
std
::
vector
<
std
::
string
>
vec_res
;
...
...
@@ -469,7 +471,8 @@ static void inline CreateVariableIfNotExit(
PyObject
*
py_var_desc
=
PyObject_GetAttrString
(
PyList_GET_ITEM
(
py_obj
,
i
),
kVarDescField
);
PADDLE_ENFORCE_NOT_NULL
(
py_var_desc
,
platform
::
errors
::
InvalidArgument
(
py_var_desc
,
platform
::
errors
::
InvalidArgument
(
"The var_desc of parameter to set is None"
));
auto
var_desc
=
PyObjectCast
<
framework
::
VarDesc
>
(
py_var_desc
);
Py_DECREF
(
py_var_desc
);
...
...
@@ -505,7 +508,8 @@ static void AssertStaticGraphAndDygraphGradMakerNoDiff() {
}
}
}
PADDLE_ENFORCE_EQ
(
ops
.
empty
(),
true
,
PADDLE_ENFORCE_EQ
(
ops
.
empty
(),
true
,
platform
::
errors
::
Unimplemented
(
"OperatorWithKernel [%s] have only static graph grad "
"maker or have only dygraph grad maker, which is not "
...
...
@@ -527,8 +531,10 @@ static int GetNCCLVersion() {
#endif
template
<
typename
PlaceType
>
static
void
TensorCopyFrom
(
framework
::
Tensor
*
dst
,
const
framework
::
Tensor
&
src
,
const
PlaceType
&
place
,
int64_t
batch_size
)
{
static
void
TensorCopyFrom
(
framework
::
Tensor
*
dst
,
const
framework
::
Tensor
&
src
,
const
PlaceType
&
place
,
int64_t
batch_size
)
{
if
(
batch_size
<
0
)
{
framework
::
TensorCopy
(
src
,
place
,
dst
);
}
else
{
...
...
@@ -612,7 +618,8 @@ PYBIND11_MODULE(core_noavx, m) {
PyCapsule_GetPointer
(
dltensor
->
ptr
(),
"dltensor"
));
PADDLE_ENFORCE_NOT_NULL
(
dmt
,
platform
::
errors
::
InvalidArgument
(
dmt
,
platform
::
errors
::
InvalidArgument
(
"from_dlpack received an invalid capsule. "
"Note that a DLPack tensor can be consumed only once."
));
...
...
@@ -632,7 +639,8 @@ PYBIND11_MODULE(core_noavx, m) {
});
m
.
def
(
"_create_loaded_parameter"
,
[](
const
py
::
handle
&
vec_var_list
,
const
Scope
&
scope
,
[](
const
py
::
handle
&
vec_var_list
,
const
Scope
&
scope
,
const
Executor
*
executor
)
{
CreateVariableIfNotExit
(
vec_var_list
,
scope
,
executor
);
});
...
...
@@ -670,8 +678,9 @@ PYBIND11_MODULE(core_noavx, m) {
<<
", sci_mode="
<<
print_opt
.
sci_mode
;
});
m
.
def
(
"broadcast_shape"
,
[](
const
std
::
vector
<
int64_t
>
&
x_dim
,
const
std
::
vector
<
int64_t
>
&
y_dim
)
{
m
.
def
(
"broadcast_shape"
,
[](
const
std
::
vector
<
int64_t
>
&
x_dim
,
const
std
::
vector
<
int64_t
>
&
y_dim
)
{
return
phi
::
vectorize
(
operators
::
details
::
BroadcastTwoDims
(
phi
::
make_ddim
(
x_dim
),
phi
::
make_ddim
(
y_dim
),
-
1
));
});
...
...
@@ -685,7 +694,8 @@ PYBIND11_MODULE(core_noavx, m) {
m
.
def
(
"_get_use_default_grad_op_desc_maker_ops"
,
[]
{
return
OpInfoMap
::
Instance
().
GetUseDefaultGradOpDescMakerOps
();
});
m
.
def
(
"_get_all_register_op_kernels"
,
m
.
def
(
"_get_all_register_op_kernels"
,
[](
const
std
::
string
&
lib
)
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
all_kernels_info
;
...
...
@@ -712,8 +722,7 @@ PYBIND11_MODULE(core_noavx, m) {
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
framework
::
OpKernelType
kernel_type
=
framework
::
TransPhiKernelKeyToOpKernelType
(
info_pair
.
first
);
auto
kernel_type_str
=
framework
::
KernelTypeToString
(
kernel_type
);
auto
kernel_type_str
=
framework
::
KernelTypeToString
(
kernel_type
);
if
(
all_kernels_info
.
count
(
op_type
))
{
if
(
std
::
find
(
all_kernels_info
[
op_type
].
begin
(),
all_kernels_info
[
op_type
].
end
(),
...
...
@@ -796,14 +805,22 @@ PYBIND11_MODULE(core_noavx, m) {
self
.
EmplaceBackOutput
(
std
::
move
(
CastPyArg2Tensor
(
obj
,
1
)));
}
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
bool
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
float
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int64_t
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
bool
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
float
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int64_t
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
string
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
...
...
@@ -817,13 +834,14 @@ PYBIND11_MODULE(core_noavx, m) {
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
vector
<
int64_t
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
vector
<
std
::
string
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
});
py
::
class_
<
framework
::
Tensor
>
framework_tensor
(
m
,
"Tensor"
,
py
::
buffer_protocol
());
py
::
class_
<
framework
::
Tensor
>
framework_tensor
(
m
,
"Tensor"
,
py
::
buffer_protocol
());
g_framework_tensor_pytype
=
reinterpret_cast
<
PyTypeObject
*>
(
framework_tensor
.
ptr
());
framework_tensor
...
...
@@ -898,70 +916,118 @@ PYBIND11_MODULE(core_noavx, m) {
self
.
mutable_data
<
float
>
(
place
);
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
XPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
XPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
MLUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
MLUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_clear"
,
&
framework
::
Tensor
::
clear
)
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
NPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
NPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
Place
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
IPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
,
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
Place
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
IPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
,
R"DOC(
Set the data of Tensor on place with given numpy array.
...
...
@@ -985,7 +1051,8 @@ PYBIND11_MODULE(core_noavx, m) {
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
)DOC"
)
.
def
(
"shape"
,
.
def
(
"shape"
,
[](
framework
::
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
},
R"DOC(
Return the shape of Tensor.
...
...
@@ -1046,9 +1113,9 @@ PYBIND11_MODULE(core_noavx, m) {
ostr
<<
self
;
return
ostr
.
str
();
})
/* ------ End of original Tensor ------ */
.
def
(
"__init__"
,
[](
framework
::
Tensor
&
instance
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
.
def
(
"__init__"
,
[](
framework
::
Tensor
&
instance
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
recursive_sequence_lengths
)
{
LoD
new_lod
;
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
...
...
@@ -1057,7 +1124,8 @@ PYBIND11_MODULE(core_noavx, m) {
std
::
back_inserter
(
new_lod
));
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_offset_lod
,
-
1
),
true
,
CheckLoD
(
new_offset_lod
,
-
1
),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided recursive_sequence_lengths info is "
"invalid, "
...
...
@@ -1075,7 +1143,8 @@ PYBIND11_MODULE(core_noavx, m) {
// avoid misuse.
// The discussion is here:
// https://github.com/PaddlePaddle/Paddle/issues/10855
.
def
(
"set_lod"
,
.
def
(
"set_lod"
,
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
lod
)
{
// the input lod is offset-based level-of-detail info
...
...
@@ -1083,12 +1152,14 @@ PYBIND11_MODULE(core_noavx, m) {
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
CheckLoD
(
new_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided LoD is invalid, the LoD is %s"
,
new_lod
));
self
.
set_lod
(
new_lod
);
},
py
::
arg
(
"lod"
),
R"DOC(
py
::
arg
(
"lod"
),
R"DOC(
Set LoD of the Tensor.
Args:
...
...
@@ -1108,8 +1179,10 @@ PYBIND11_MODULE(core_noavx, m) {
t.set_lod([[0, 2, 5]])
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
.
def
(
"set_recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
.
def
(
"set_recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
recursive_sequence_lengths
)
{
// the input recursive_sequence_lengths is length-based
// level-of-detail info
...
...
@@ -1120,7 +1193,8 @@ PYBIND11_MODULE(core_noavx, m) {
std
::
back_inserter
(
new_lod
));
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_offset_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
CheckLoD
(
new_offset_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided recursive_sequence_lengths info is "
"invalid, "
...
...
@@ -1129,7 +1203,8 @@ PYBIND11_MODULE(core_noavx, m) {
new_lod
));
self
.
set_lod
(
new_offset_lod
);
},
py
::
arg
(
"recursive_sequence_lengths"
),
R"DOC(
py
::
arg
(
"recursive_sequence_lengths"
),
R"DOC(
Set LoD of the Tensor according to recursive sequence lengths.
For example, if recursive_sequence_lengths=[[2, 3]], which means
...
...
@@ -1154,7 +1229,8 @@ PYBIND11_MODULE(core_noavx, m) {
print(t.recursive_sequence_lengths()) # [[2, 3]]
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
.
def
(
"lod"
,
.
def
(
"lod"
,
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
// output the offset-based lod info
LoD
lod
=
self
.
lod
();
...
...
@@ -1181,7 +1257,8 @@ PYBIND11_MODULE(core_noavx, m) {
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
// Set above comments of set_lod.
.
def
(
"recursive_sequence_lengths"
,
.
def
(
"recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
// output the length-based lod info
LoD
lod
=
phi
::
ConvertToLengthBasedLoD
(
self
.
lod
());
...
...
@@ -1208,7 +1285,8 @@ PYBIND11_MODULE(core_noavx, m) {
t.set_recursive_sequence_lengths([[2, 3]])
print(t.recursive_sequence_lengths()) # [[2, 3]]
)DOC"
)
.
def
(
"has_valid_recursive_sequence_lengths"
,
.
def
(
"has_valid_recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
)
->
bool
{
// Check that the lod info is valid and match the outermost
// dimension of the Tensor data
...
...
@@ -1594,11 +1672,13 @@ PYBIND11_MODULE(core_noavx, m) {
new
(
&
instance
)
phi
::
SelectedRows
();
})
.
def
(
"__init__"
,
[](
phi
::
SelectedRows
&
instance
,
const
std
::
vector
<
int64_t
>
rows
,
[](
phi
::
SelectedRows
&
instance
,
const
std
::
vector
<
int64_t
>
rows
,
const
int64_t
&
height
)
{
new
(
&
instance
)
phi
::
SelectedRows
(
rows
,
height
);
})
.
def
(
"get_tensor"
,
.
def
(
"get_tensor"
,
[](
phi
::
SelectedRows
&
self
)
{
return
self
.
mutable_value
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"numel"
,
...
...
@@ -1642,7 +1722,8 @@ All parameter, weight, gradient are variables in Paddle.
})
.
def
(
"get_float"
,
[](
const
Variable
&
var
)
->
float
{
return
var
.
Get
<
float
>
();
})
.
def
(
"get_tensor"
,
.
def
(
"get_tensor"
,
[](
Variable
&
self
)
->
LoDTensor
*
{
return
self
.
GetMutable
<
LoDTensor
>
();
},
...
...
@@ -1655,50 +1736,61 @@ All parameter, weight, gradient are variables in Paddle.
[](
Variable
&
self
,
Strings
str_list
)
{
*
self
.
GetMutable
<
Strings
>
()
=
str_list
;
})
.
def
(
"set_vocab"
,
[](
Variable
&
self
,
Vocab
vocab
)
{
*
self
.
GetMutable
<
Vocab
>
()
=
vocab
;
})
.
def
(
"get_string_tensor"
,
.
def
(
"set_vocab"
,
[](
Variable
&
self
,
Vocab
vocab
)
{
*
self
.
GetMutable
<
Vocab
>
()
=
vocab
;
})
.
def
(
"get_string_tensor"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Strings
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_map_tensor"
,
.
def
(
"get_map_tensor"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Vocab
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_rank_table"
,
.
def
(
"get_lod_rank_table"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDRankTable
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_selected_rows"
,
.
def
(
"get_selected_rows"
,
[](
Variable
&
self
)
->
phi
::
SelectedRows
*
{
return
self
.
GetMutable
<
phi
::
SelectedRows
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_tensor_array"
,
.
def
(
"get_lod_tensor_array"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_fetch_list"
,
.
def
(
"get_fetch_list"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
FetchList
>
();
},
py
::
return_value_policy
::
reference
)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
.
def
(
"get_communicator"
,
.
def
(
"get_communicator"
,
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
return
self
.
GetMutable
<
platform
::
Communicator
>
();
},
py
::
return_value_policy
::
reference
)
#endif
.
def
(
"get_reader"
,
.
def
(
"get_reader"
,
[](
Variable
&
self
)
->
framework
::
ReaderHolder
*
{
PADDLE_ENFORCE_EQ
(
self
.
IsType
<
framework
::
ReaderHolder
>
(),
true
,
PADDLE_ENFORCE_EQ
(
self
.
IsType
<
framework
::
ReaderHolder
>
(),
true
,
platform
::
errors
::
InvalidArgument
(
"The variable is not type of ReaderHolder."
));
return
self
.
GetMutable
<
framework
::
ReaderHolder
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_scope"
,
.
def
(
"get_scope"
,
[](
Variable
&
self
)
->
Scope
*
{
auto
scope_vec
=
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
auto
scope_vec
=
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
PADDLE_ENFORCE_GT
(
scope_vec
->
size
(),
0
,
scope_vec
->
size
(),
0
,
platform
::
errors
::
InvalidArgument
(
"The size of scope_vec should be greater than 0"
));
return
scope_vec
->
front
();
...
...
@@ -1736,7 +1828,8 @@ All parameter, weight, gradient are variables in Paddle.
_Scope
.
def
(
"_remove_from_pool"
,
[](
Scope
&
self
)
{
ScopePool
::
Instance
().
Remove
(
&
self
);
})
.
def
(
"var"
,
.
def
(
"var"
,
[](
Scope
&
self
,
const
std
::
string
&
name
)
->
Variable
*
{
return
self
.
Var
(
name
);
},
...
...
@@ -1755,7 +1848,9 @@ All parameter, weight, gradient are variables in Paddle.
out (core.Variable): the found or created variable.
)DOC"
,
py
::
return_value_policy
::
reference
)
.
def
(
"find_var"
,
&
Scope
::
FindVar
,
py
::
arg
(
"name"
),
.
def
(
"find_var"
,
&
Scope
::
FindVar
,
py
::
arg
(
"name"
),
R"DOC(
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
...
...
@@ -1768,7 +1863,9 @@ All parameter, weight, gradient are variables in Paddle.
)DOC"
,
py
::
return_value_policy
::
reference
)
.
def
(
"size"
,
&
Scope
::
Size
)
.
def
(
"erase"
,
&
Scope
::
EraseVars
,
py
::
arg
(
"names"
),
.
def
(
"erase"
,
&
Scope
::
EraseVars
,
py
::
arg
(
"names"
),
R"DOC(
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
...
...
@@ -1780,7 +1877,9 @@ All parameter, weight, gradient are variables in Paddle.
None
)DOC"
,
py
::
return_value_policy
::
reference
)
.
def
(
"new_scope"
,
[](
Scope
&
self
)
->
Scope
*
{
return
&
self
.
NewScope
();
},
.
def
(
"new_scope"
,
[](
Scope
&
self
)
->
Scope
*
{
return
&
self
.
NewScope
();
},
R"DOC(
Create a new sub-scope of the current scope.
...
...
@@ -1788,13 +1887,15 @@ All parameter, weight, gradient are variables in Paddle.
out (core._Scope): the created sub-scope.
)DOC"
,
py
::
return_value_policy
::
reference
)
.
def
(
"drop_kids"
,
&
Scope
::
DropKids
,
.
def
(
"drop_kids"
,
&
Scope
::
DropKids
,
R"DOC(
Delete all sub-scopes of the current scope.
)DOC"
)
.
def
(
"_kids"
,
&
Scope
::
kids
);
m
.
def
(
"Scope"
,
m
.
def
(
"Scope"
,
[]()
->
Scope
*
{
auto
*
s
=
new
Scope
();
ScopePool
::
Instance
().
Insert
(
std
::
unique_ptr
<
Scope
>
(
s
));
...
...
@@ -1817,7 +1918,8 @@ All parameter, weight, gradient are variables in Paddle.
if
(
info
.
HasOpProtoAndChecker
())
{
std
::
string
str
;
PADDLE_ENFORCE_EQ
(
info
.
Proto
().
SerializeToString
(
&
str
),
true
,
info
.
Proto
().
SerializeToString
(
&
str
),
true
,
platform
::
errors
::
Fatal
(
"Serialize OpProto Error. This could be a bug of Paddle."
));
ret_values
.
emplace_back
(
str
);
...
...
@@ -1838,18 +1940,20 @@ All parameter, weight, gradient are variables in Paddle.
}
return
res
;
});
m
.
def
(
"get_grad_op_desc"
,
[](
const
OpDesc
&
op_desc
,
m
.
def
(
"get_grad_op_desc"
,
[](
const
OpDesc
&
op_desc
,
const
std
::
unordered_set
<
std
::
string
>
&
no_grad_set
,
const
std
::
vector
<
BlockDesc
*>
&
grad_sub_block
)
{
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_to_var
;
std
::
vector
<
std
::
unique_ptr
<
OpDesc
>>
grad_op_descs
=
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
&
grad_to_var
,
grad_sub_block
);
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
&
grad_to_var
,
grad_sub_block
);
std
::
vector
<
OpDesc
*>
grad_op_desc_ptrs
(
grad_op_descs
.
size
());
std
::
transform
(
grad_op_descs
.
begin
(),
grad_op_descs
.
end
(),
std
::
transform
(
grad_op_descs
.
begin
(),
grad_op_descs
.
end
(),
grad_op_desc_ptrs
.
begin
(),
[](
std
::
unique_ptr
<
OpDesc
>
&
p
)
{
return
p
.
release
();
});
return
std
::
make_pair
(
grad_op_desc_ptrs
,
grad_to_var
);
...
...
@@ -1866,7 +1970,8 @@ All parameter, weight, gradient are variables in Paddle.
return
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type
).
HasInferInplace
();
});
m
.
def
(
"infer_no_need_buffer_slots"
,
[](
const
std
::
string
op_type
,
const
framework
::
VariableNameMap
&
inputs
,
[](
const
std
::
string
op_type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
{
auto
infer_func
=
framework
::
OpInfoMap
::
Instance
()
...
...
@@ -1879,7 +1984,8 @@ All parameter, weight, gradient are variables in Paddle.
return
empty
;
}
});
m
.
def
(
"prune"
,
[](
const
ProgramDesc
&
origin
,
m
.
def
(
"prune"
,
[](
const
ProgramDesc
&
origin
,
const
std
::
set
<
std
::
string
>
&
feeded_var_names
,
const
std
::
vector
<
std
::
array
<
size_t
,
2
>>
&
targets
)
{
ProgramDesc
prog_with_targets
(
origin
);
...
...
@@ -1893,7 +1999,8 @@ All parameter, weight, gradient are variables in Paddle.
return
std
::
make_tuple
(
ProgramDesc
(
pruned_desc
),
pruned_origin_block_id_map
);
});
m
.
def
(
"prune_backward"
,
m
.
def
(
"prune_backward"
,
[](
const
framework
::
ProgramDesc
&
program
)
{
return
PruneBackward
(
program
);
},
...
...
@@ -2040,12 +2147,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types
=
phi
::
DeviceManager
::
GetAllDeviceTypes
();
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_all_device_type because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_all_device_type, please try to install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
return
device_types
;
});
...
...
@@ -2054,12 +2161,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types
=
phi
::
DeviceManager
::
GetAllCustomDeviceTypes
();
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_all_custom_device_type because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_all_custom_device_type, please try to "
"install CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
return
device_types
;
});
...
...
@@ -2068,12 +2175,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices
=
phi
::
DeviceManager
::
GetAllDeviceList
();
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_available_device because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_available_device, please try to install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
return
devices
;
});
...
...
@@ -2082,18 +2189,19 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices
=
phi
::
DeviceManager
::
GetAllCustomDeviceList
();
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_available_custom_device because you have "
"installed"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_available_custom_device, please try to "
"install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
return
devices
;
});
py
::
class_
<
platform
::
CustomPlace
>
(
m
,
"CustomPlace"
,
py
::
class_
<
platform
::
CustomPlace
>
(
m
,
"CustomPlace"
,
R"DOC(
CustomPlace is a descriptor of a device.
It represents a custom device on which a tensor will be allocated and a model will run.
...
...
@@ -2105,7 +2213,8 @@ All parameter, weight, gradient are variables in Paddle.
fake_cpu_place = paddle.CustomPlace("FakeCPU", 0)
)DOC"
)
.
def
(
"__init__"
,
[](
platform
::
CustomPlace
&
self
,
const
std
::
string
&
device_type
,
[](
platform
::
CustomPlace
&
self
,
const
std
::
string
&
device_type
,
int
dev_id
)
{
#ifdef PADDLE_WITH_CUSTOM_DEVICE
if
(
UNLIKELY
(
dev_id
<
0
))
{
...
...
@@ -2113,7 +2222,8 @@ All parameter, weight, gradient are variables in Paddle.
"Invalid CustomPlace(%s, %d), device id must be 0 "
"or "
"positive integer"
,
device_type
,
dev_id
);
device_type
,
dev_id
);
std
::
exit
(
-
1
);
}
...
...
@@ -2134,7 +2244,11 @@ All parameter, weight, gradient are variables in Paddle.
"inside "
"[0, %d), because %s "
"number on your machine is %d"
,
device_type
,
dev_id
,
dev_count
,
device_type
,
dev_count
);
device_type
,
dev_id
,
dev_count
,
device_type
,
dev_count
);
std
::
exit
(
-
1
);
}
}
...
...
@@ -2144,7 +2258,8 @@ All parameter, weight, gradient are variables in Paddle.
"Invalid CustomPlace(%s, %d), the device type is "
"not registered "
"as a custom device."
,
device_type
,
dev_id
);
device_type
,
dev_id
);
std
::
exit
(
-
1
);
}
#else
...
...
@@ -2153,7 +2268,7 @@ All parameter, weight, gradient are variables in Paddle.
"version PaddlePaddle.
\n
"
"If you want to use CustomDevice, please try to install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
"PaddlePaddle by: pip install paddlepaddle
\n
"
"If you only have CPU, please change "
"CustomPlace(%s, %d) to be CPUPlace().
\n
"
,
device_type
,
dev_id
);
...
...
@@ -2215,7 +2330,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid CUDAPlace(%d), must inside [0, %d), because GPU "
"number on your machine is %d"
,
dev_id
,
platform
::
GetGPUDeviceCount
(),
dev_id
,
platform
::
GetGPUDeviceCount
(),
platform
::
GetGPUDeviceCount
());
std
::
exit
(
-
1
);
}
...
...
@@ -2281,7 +2397,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid XPUPlace(%d), must inside [0, %d), because XPU "
"number on your machine is %d"
,
dev_id
,
platform
::
GetXPUDeviceCount
(),
dev_id
,
platform
::
GetXPUDeviceCount
(),
platform
::
GetXPUDeviceCount
());
std
::
exit
(
-
1
);
}
...
...
@@ -2446,7 +2563,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid NPUPlace(%d), must inside [0, %d), because NPU "
"number on your machine is %d"
,
dev_id
,
platform
::
GetNPUDeviceCount
(),
dev_id
,
platform
::
GetNPUDeviceCount
(),
platform
::
GetNPUDeviceCount
());
std
::
exit
(
-
1
);
}
...
...
@@ -2562,7 +2680,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid MLUPlace(%d), must inside [0, %d), because MLU "
"number on your machine is %d"
,
dev_id
,
platform
::
GetMLUDeviceCount
(),
dev_id
,
platform
::
GetMLUDeviceCount
(),
platform
::
GetMLUDeviceCount
());
std
::
exit
(
-
1
);
}
...
...
@@ -2635,8 +2754,10 @@ All parameter, weight, gradient are variables in Paddle.
.
def
(
"mlu_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"custom_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
const
platform
::
Place
&
other
)
{
self
=
other
;
})
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
const
platform
::
Place
&
other
)
{
self
=
other
;
})
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
const
platform
::
CPUPlace
&
cpu_place
)
{
self
=
cpu_place
;
...
...
@@ -2681,7 +2802,8 @@ All parameter, weight, gradient are variables in Paddle.
true
,
platform
::
errors
::
InvalidArgument
(
"Cannot parse user input to OpDesc"
));
PADDLE_ENFORCE_EQ
(
desc
.
IsInitialized
(),
true
,
PADDLE_ENFORCE_EQ
(
desc
.
IsInitialized
(),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided OpDesc is not "
"initialized, the reason is: %s"
,
...
...
@@ -2689,37 +2811,43 @@ All parameter, weight, gradient are variables in Paddle.
return
OpRegistry
::
CreateOp
(
desc
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
XPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
NPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CUDAPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CUDAPinnedPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
MLUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
...
...
@@ -2745,7 +2873,8 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
framework
::
TrainerBase
,
std
::
shared_ptr
<
framework
::
TrainerBase
>>
(
m
,
"TrainerBase"
)
.
def
(
"get_worker_scope"
,
.
def
(
"get_worker_scope"
,
[](
TrainerBase
&
self
,
int
thread_id
)
->
Scope
*
{
return
self
.
GetWorkerScope
(
thread_id
);
},
...
...
@@ -2758,13 +2887,17 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
framework
::
Executor
>
(
m
,
"Executor"
)
.
def
(
py
::
init
<
const
platform
::
Place
&>
())
.
def
(
"close"
,
&
Executor
::
Close
)
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
,
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"release_trainer"
,
&
Executor
::
ReleaseTrainer
,
.
def
(
"release_trainer"
,
&
Executor
::
ReleaseTrainer
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"init_for_dataset"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
const
std
::
string
&
trainer_desc
,
Scope
*
scope
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
const
std
::
string
&
trainer_desc
,
Scope
*
scope
,
Dataset
*
dataset
)
->
std
::
shared_ptr
<
TrainerBase
>
{
pybind11
::
gil_scoped_release
release
;
return
self
.
InitForDataset
(
prog
,
trainer_desc
,
scope
,
dataset
);
...
...
@@ -2775,40 +2908,62 @@ All parameter, weight, gradient are variables in Paddle.
self
.
RunFromDataset
(
trainer
);
})
.
def
(
"run_prepared_ctx"
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>
*
feed_targets
,
std
::
map
<
std
::
string
,
FetchType
*>
*
fetch_targets
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
fetch_holder_name
=
"fetch"
)
{
pybind11
::
gil_scoped_release
release
;
self
.
RunPreparedContext
(
ctx
,
scope
,
feed_targets
,
fetch_targets
,
create_local_scope
,
create_vars
,
feed_holder_name
,
fetch_holder_name
);
self
.
RunPreparedContext
(
ctx
,
scope
,
feed_targets
,
fetch_targets
,
create_local_scope
,
create_vars
,
feed_holder_name
,
fetch_holder_name
);
})
.
def
(
"run_prepared_ctx"
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
bool
keep_kids
=
false
)
{
pybind11
::
gil_scoped_release
release
;
self
.
RunPreparedContext
(
ctx
,
scope
,
create_local_scope
,
create_vars
,
keep_kids
);
self
.
RunPreparedContext
(
ctx
,
scope
,
create_local_scope
,
create_vars
,
keep_kids
);
})
.
def
(
"prepare"
,
[](
Executor
&
self
,
const
ProgramDesc
&
program
,
int
block_id
,
[](
Executor
&
self
,
const
ProgramDesc
&
program
,
int
block_id
,
const
std
::
vector
<
std
::
string
>
&
skip_ref_cnt_vars
=
std
::
vector
<
std
::
string
>
(),
bool
force_disable_gc
=
false
)
{
pybind11
::
gil_scoped_release
release
;
return
self
.
Prepare
(
program
,
block_id
,
skip_ref_cnt_vars
,
force_disable_gc
);
return
self
.
Prepare
(
program
,
block_id
,
skip_ref_cnt_vars
,
force_disable_gc
);
})
.
def
(
"create_variables"
,
&
Executor
::
CreateVariables
)
.
def
(
"run"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
Scope
*
scope
,
int
block_id
,
bool
create_local_scope
,
bool
create_vars
,
.
def
(
"run"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
Scope
*
scope
,
int
block_id
,
bool
create_local_scope
,
bool
create_vars
,
const
std
::
vector
<
std
::
string
>
&
fetch_vars
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
prog
,
scope
,
block_id
,
create_local_scope
,
create_vars
,
self
.
Run
(
prog
,
scope
,
block_id
,
create_local_scope
,
create_vars
,
fetch_vars
);
});
...
...
@@ -2821,8 +2976,10 @@ All parameter, weight, gradient are variables in Paddle.
});
py
::
class_
<
framework
::
StandaloneExecutor
>
(
m
,
"StandaloneExecutor"
)
.
def
(
py
::
init
<
const
platform
::
Place
&
,
const
ProgramDesc
&
,
const
ProgramDesc
&
,
Scope
*>
())
.
def
(
py
::
init
<
const
platform
::
Place
&
,
const
ProgramDesc
&
,
const
ProgramDesc
&
,
Scope
*>
())
.
def
(
"run"
,
[](
StandaloneExecutor
&
self
,
const
std
::
unordered_map
<
std
::
string
,
py
::
array
>
&
input_dict
,
...
...
@@ -2866,11 +3023,13 @@ All parameter, weight, gradient are variables in Paddle.
return
py
::
cast
(
std
::
move
(
ret
));
})
.
def
(
"run"
,
[](
StandaloneExecutor
&
self
,
std
::
vector
<
std
::
string
>
feed_names
,
[](
StandaloneExecutor
&
self
,
std
::
vector
<
std
::
string
>
feed_names
,
std
::
vector
<
std
::
string
>
fetch_names
)
{
platform
::
RecordEvent
record_event
(
"StandaloneExecutor:run"
,
platform
::
TracerEventType
::
UserDefined
,
1
);
platform
::
TracerEventType
::
UserDefined
,
1
);
paddle
::
framework
::
FetchList
ret
;
{
pybind11
::
gil_scoped_release
release
;
...
...
@@ -2951,20 +3110,29 @@ All parameter, weight, gradient are variables in Paddle.
});
m
.
def
(
"memory_stat_get_current"
,
memory
::
StatGetCurrentValue
);
m
.
def
(
"memory_stat_get_peak"
,
memory
::
StatGetPeakValue
);
m
.
def
(
"run_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
-
1
,
m
.
def
(
"run_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
-
1
,
int
sleep_inter
=
-
1
)
->
const
std
::
string
{
return
paddle
::
framework
::
shell_get_command_output
(
cmd
,
time_out
,
sleep_inter
);
return
paddle
::
framework
::
shell_get_command_output
(
cmd
,
time_out
,
sleep_inter
);
},
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
-
1
,
py
::
arg
(
"sleep_inter"
)
=
-
1
);
m
.
def
(
"shell_execute_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
0
,
int
sleep_inter
=
0
,
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
-
1
,
py
::
arg
(
"sleep_inter"
)
=
-
1
);
m
.
def
(
"shell_execute_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
0
,
int
sleep_inter
=
0
,
bool
redirect_stderr
=
false
)
->
std
::
vector
<
std
::
string
>
{
return
paddle
::
framework
::
shell_execute_cmd
(
cmd
,
time_out
,
sleep_inter
,
redirect_stderr
);
},
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
0
,
py
::
arg
(
"sleep_inter"
)
=
0
,
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
0
,
py
::
arg
(
"sleep_inter"
)
=
0
,
py
::
arg
(
"redirect_stderr"
)
=
false
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...
...
@@ -2979,13 +3147,16 @@ All parameter, weight, gradient are variables in Paddle.
#endif
m
.
def
(
"set_feed_variable"
,
static_cast
<
void
(
*
)(
Scope
*
,
const
LoDTensor
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
static_cast
<
void
(
*
)(
Scope
*
,
const
LoDTensor
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
m
.
def
(
"set_feed_variable"
,
static_cast
<
void
(
*
)(
Scope
*
,
const
Strings
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
static_cast
<
void
(
*
)(
Scope
*
,
const
Strings
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
m
.
def
(
"get_fetch_variable"
,
[](
const
Scope
&
scope
,
const
std
::
string
&
var_name
,
[](
const
Scope
&
scope
,
const
std
::
string
&
var_name
,
size_t
index
)
->
py
::
object
{
auto
&
var
=
framework
::
GetFetchVariable
(
scope
,
var_name
,
index
);
if
(
data_is_lod_tensor
(
var
))
{
...
...
@@ -3033,26 +3204,30 @@ All parameter, weight, gradient are variables in Paddle.
pylodtensorarray
.
def
(
"__init__"
,
[](
LoDTensorArray
&
instance
)
{
new
(
&
instance
)
LoDTensorArray
();
})
.
def
(
"__getitem__"
,
.
def
(
"__getitem__"
,
[](
LoDTensorArray
&
self
,
size_t
i
)
{
return
&
self
.
at
(
i
);
},
py
::
return_value_policy
::
reference
)
.
def
(
"__len__"
,
[](
LoDTensorArray
&
self
)
{
return
self
.
size
();
})
.
def
(
"__setitem__"
,
[](
LoDTensorArray
&
self
,
size_t
i
,
const
LoDTensor
&
t
)
{
PADDLE_ENFORCE_LT
(
i
,
self
.
size
(),
PADDLE_ENFORCE_LT
(
i
,
self
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The index to set is larger than the size "
"of LoDTensorArray."
));
self
[
i
].
ShareDataWith
(
t
);
self
[
i
].
set_lod
(
t
.
lod
());
})
.
def
(
"append"
,
.
def
(
"append"
,
[](
LoDTensorArray
&
self
,
const
LoDTensor
&
t
)
{
self
.
emplace_back
();
self
.
back
().
ShareDataWith
(
t
);
self
.
back
().
set_lod
(
t
.
lod
());
},
py
::
arg
(
"tensor"
),
R"DOC(
py
::
arg
(
"tensor"
),
R"DOC(
Append a LoDensor to LoDTensorArray.
Args:
...
...
@@ -3072,7 +3247,8 @@ All parameter, weight, gradient are variables in Paddle.
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
arr.append(t)
)DOC"
)
.
def
(
"_move_to_list"
,
.
def
(
"_move_to_list"
,
[](
LoDTensorArray
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
...
...
@@ -3086,7 +3262,8 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
FetchList
>
(
m
,
"FetchList"
,
R"DOC( FetchList is a
vector of boost::variant<LoDTensor, LoDTensorArray>.
)DOC"
)
.
def
(
"_move_to_list"
,
.
def
(
"_move_to_list"
,
[](
FetchList
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
...
...
@@ -3107,7 +3284,8 @@ All parameter, weight, gradient are variables in Paddle.
},
py
::
return_value_policy
::
take_ownership
)
.
def
(
"append"
,
.
def
(
"append"
,
[](
FetchList
&
self
,
const
LoDTensor
&
t
)
{
self
.
emplace_back
();
auto
&
lod_tensor
=
BOOST_GET
(
LoDTensor
,
self
.
back
());
...
...
@@ -3116,7 +3294,8 @@ All parameter, weight, gradient are variables in Paddle.
},
py
::
arg
(
"var"
))
.
def
(
"append"
,
.
def
(
"append"
,
[](
FetchList
&
self
,
const
LoDTensorArray
&
t
)
{
self
.
emplace_back
();
auto
&
lod_tensor_array
=
BOOST_GET
(
LoDTensorArray
,
self
.
back
());
...
...
@@ -3130,7 +3309,8 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
FetchUnmergedList
>
(
m
,
"FetchUnmergedList"
,
R"DOC(
FetchUnmergedList is 2-D array of FetchType(boost::variant(LoDTensor, LoDTensorArray)).
)DOC"
)
.
def
(
"_move_to_list"
,
.
def
(
"_move_to_list"
,
[](
FetchUnmergedList
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
...
...
@@ -3168,7 +3348,8 @@ All parameter, weight, gradient are variables in Paddle.
}
platform
::
EmptyCache
();
});
m
.
def
(
"get_device_properties"
,
m
.
def
(
"get_device_properties"
,
[](
int
id
)
->
const
gpuDeviceProp
&
{
return
platform
::
GetDeviceProperties
(
id
);
},
...
...
@@ -3283,16 +3464,18 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"reset_profiler"
,
platform
::
ResetProfiler
);
m
.
def
(
"register_pass"
,
[](
const
std
::
string
&
pass_type
,
py
::
object
callable
)
{
PADDLE_ENFORCE_EQ
(
framework
::
ir
::
PassRegistry
::
Instance
().
Has
(
pass_type
),
false
,
framework
::
ir
::
PassRegistry
::
Instance
().
Has
(
pass_type
),
false
,
platform
::
errors
::
AlreadyExists
(
"Pass '%s' is registered more than "
"once. Please use another name."
,
pass_type
));
callable
.
inc_ref
();
framework
::
ir
::
PassRegistry
::
Instance
().
Insert
(
pass_type
,
[
pass_type
,
callable
]()
{
framework
::
ir
::
PassRegistry
::
Instance
().
Insert
(
pass_type
,
[
pass_type
,
callable
]()
{
py
::
gil_scoped_acquire
guard
;
std
::
unique_ptr
<
framework
::
ir
::
Pass
>
pass
(
new
framework
::
ir
::
GeneratePass
(
py
::
cast
<
std
::
string
>
(
callable
())));
new
framework
::
ir
::
GeneratePass
(
py
::
cast
<
std
::
string
>
(
callable
())));
return
pass
;
});
});
...
...
@@ -3304,7 +3487,8 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"size_of_dtype"
,
framework
::
SizeOfType
);
py
::
class_
<
paddle
::
platform
::
ProfilerResult
>
(
m
,
"_ProfilerResult"
)
.
def
(
py
::
init
<>
())
.
def
(
"get_data"
,
&
paddle
::
platform
::
ProfilerResult
::
GetData
,
.
def
(
"get_data"
,
&
paddle
::
platform
::
ProfilerResult
::
GetData
,
py
::
return_value_policy
::
automatic_reference
)
.
def
(
"save"
,
&
paddle
::
platform
::
ProfilerResult
::
Save
)
.
def
(
"get_extra_info"
,
&
paddle
::
platform
::
ProfilerResult
::
GetExtraInfo
);
...
...
@@ -3339,7 +3523,8 @@ All parameter, weight, gradient are variables in Paddle.
&
paddle
::
platform
::
HostPythonNode
::
device_node_ptrs
);
py
::
class_
<
paddle
::
platform
::
Profiler
>
(
m
,
"_Profiler"
)
.
def
(
"create"
,
&
paddle
::
platform
::
Profiler
::
Create
,
.
def
(
"create"
,
&
paddle
::
platform
::
Profiler
::
Create
,
py
::
return_value_policy
::
take_ownership
)
.
def
(
"is_cupti_supported"
,
&
paddle
::
platform
::
Profiler
::
IsCuptiSupported
)
.
def
(
"is_cnpapi_supported"
,
...
...
@@ -3350,7 +3535,8 @@ All parameter, weight, gradient are variables in Paddle.
profiler
->
Prepare
();
})
.
def
(
"start"
,
&
paddle
::
platform
::
Profiler
::
Start
)
.
def
(
"stop"
,
.
def
(
"stop"
,
[](
paddle
::
platform
::
Profiler
*
profiler
)
{
platform
::
DisableHostEventRecorder
();
return
profiler
->
Stop
();
...
...
@@ -3412,22 +3598,29 @@ All parameter, weight, gradient are variables in Paddle.
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
const
std
::
string
&
attr
)
{
self
.
Set
<
std
::
string
>
(
name
,
new
std
::
string
(
attr
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
bool
val
)
{
self
.
Set
<
bool
>
(
name
,
new
bool
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
int
val
)
{
self
.
Set
<
const
int
>
(
name
,
new
int
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
bool
val
)
{
self
.
Set
<
bool
>
(
name
,
new
bool
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
int
val
)
{
self
.
Set
<
const
int
>
(
name
,
new
int
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
vector
<
std
::
string
>
set
)
{
self
.
Set
(
name
,
new
std
::
vector
<
std
::
string
>
(
set
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
unordered_set
<
std
::
string
>
set
)
{
self
.
Set
(
name
,
new
std
::
unordered_set
<
std
::
string
>
(
set
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
unordered_set
<
int
>
set
)
{
self
.
Set
(
name
,
new
std
::
unordered_set
<
int
>
(
set
));
})
...
...
@@ -3604,7 +3797,8 @@ All parameter, weight, gradient are variables in Paddle.
},
R"DOC(This config that the this is distributed training with parameter server
)DOC"
)
.
def_property
(
"_dry_run"
,
.
def_property
(
"_dry_run"
,
[](
const
ExecutionStrategy
&
self
)
{
return
self
.
dry_run_
;
},
[](
ExecutionStrategy
&
self
,
bool
dry_run
)
{
self
.
dry_run_
=
dry_run
;
...
...
@@ -3671,7 +3865,8 @@ All parameter, weight, gradient are variables in Paddle.
"reduce_strategy"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
reduce_
;
},
[](
BuildStrategy
&
self
,
BuildStrategy
::
ReduceStrategy
strategy
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3701,7 +3896,8 @@ All parameter, weight, gradient are variables in Paddle.
[](
const
BuildStrategy
&
self
)
{
return
self
.
gradient_scale_
;
},
[](
BuildStrategy
&
self
,
BuildStrategy
::
GradientScaleStrategy
strategy
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3766,7 +3962,8 @@ All parameter, weight, gradient are variables in Paddle.
"debug_graphviz_path"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
debug_graphviz_path_
;
},
[](
BuildStrategy
&
self
,
const
std
::
string
&
path
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3793,7 +3990,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
enable_sequential_execution_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3819,7 +4017,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
remove_unnecessary_lock_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3856,7 +4055,8 @@ All parameter, weight, gradient are variables in Paddle.
const
std
::
vector
<
std
::
string
>
&
trainers_endpoints
)
{
self
.
trainers_endpoints_
=
trainers_endpoints
;
})
.
def_property
(
"trainer_id"
,
.
def_property
(
"trainer_id"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
trainer_id_
;
},
[](
BuildStrategy
&
self
,
int
trainer_id
)
{
self
.
trainer_id_
=
trainer_id
;
...
...
@@ -3873,14 +4073,16 @@ All parameter, weight, gradient are variables in Paddle.
[](
BuildStrategy
&
self
,
int
bkcl_comm_num
)
{
self
.
bkcl_comm_num_
=
bkcl_comm_num
;
})
.
def_property
(
"use_hierarchical_allreduce"
,
.
def_property
(
"use_hierarchical_allreduce"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
use_hierarchical_allreduce_
;
},
[](
BuildStrategy
&
self
,
bool
use
)
{
self
.
use_hierarchical_allreduce_
=
use
;
})
.
def_property
(
"hierarchical_allreduce_inter_nranks"
,
.
def_property
(
"hierarchical_allreduce_inter_nranks"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
hierarchical_allreduce_inter_nranks_
;
},
...
...
@@ -3894,7 +4096,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
fuse_elewise_add_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3919,7 +4122,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_gemm_epilogue"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_gemm_epilogue_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3944,7 +4148,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_bn_act_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3969,7 +4174,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_bn_add_act_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_add_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3994,7 +4200,8 @@ All parameter, weight, gradient are variables in Paddle.
"enable_auto_fusion"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_auto_fusion_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -4022,7 +4229,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
fuse_relu_depthwise_conv_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -4045,13 +4253,15 @@ All parameter, weight, gradient are variables in Paddle.
build_strategy = static.BuildStrategy()
build_strategy.fuse_relu_depthwise_conv = True
)DOC"
)
.
def_property
(
"fuse_broadcast_ops"
,
.
def_property
(
"fuse_broadcast_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_broadcast_ops_
==
true
||
self
.
fuse_broadcast_ops_
==
paddle
::
none
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, "
"cannot be configured again."
));
...
...
@@ -4075,13 +4285,15 @@ All parameter, weight, gradient are variables in Paddle.
build_strategy = static.BuildStrategy()
build_strategy.fuse_broadcast_ops = True
)DOC"
)
.
def_property
(
"fuse_all_optimizer_ops"
,
.
def_property
(
"fuse_all_optimizer_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_all_optimizer_ops_
==
true
||
self
.
fuse_all_optimizer_ops_
==
paddle
::
none
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, "
"cannot be configured again."
));
...
...
@@ -4091,7 +4303,8 @@ All parameter, weight, gradient are variables in Paddle.
"sync_batch_norm"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
sync_batch_norm_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -4169,7 +4382,8 @@ All parameter, weight, gradient are variables in Paddle.
self
.
is_distribution_
=
b
;
#endif
})
.
def_property
(
"async_mode"
,
.
def_property
(
"async_mode"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
async_mode_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
async_mode_
=
b
;
})
.
def_property
(
...
...
@@ -4187,7 +4401,8 @@ All parameter, weight, gradient are variables in Paddle.
self
.
fuse_all_reduce_ops_
==
paddle
::
none
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
fuse_all_reduce_ops_
=
b
;
})
.
def_property
(
"enable_backward_optimizer_op_deps"
,
.
def_property
(
"enable_backward_optimizer_op_deps"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_backward_optimizer_op_deps_
;
},
...
...
@@ -4213,7 +4428,8 @@ All parameter, weight, gradient are variables in Paddle.
[](
BuildStrategy
&
self
,
bool
fix_op_run_order
)
{
self
.
fix_op_run_order_
=
fix_op_run_order
;
})
.
def_property
(
"allow_cuda_graph_capture"
,
.
def_property
(
"allow_cuda_graph_capture"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
allow_cuda_graph_capture_
;
},
...
...
@@ -4226,7 +4442,8 @@ All parameter, weight, gradient are variables in Paddle.
new_bs
.
ClearFinalized
();
return
new_bs
;
})
.
def
(
"_finalize_strategy_and_create_passes"
,
.
def
(
"_finalize_strategy_and_create_passes"
,
[](
BuildStrategy
&
self
)
->
std
::
shared_ptr
<
ir
::
PassBuilder
>
{
return
self
.
CreatePassesFromStrategy
(
true
);
},
...
...
@@ -4241,14 +4458,19 @@ All parameter, weight, gradient are variables in Paddle.
});
pe
.
def
(
py
::
init
<
const
std
::
vector
<
platform
::
Place
>
&
,
const
std
::
vector
<
std
::
string
>
&
,
const
std
::
string
&
,
Scope
*
,
std
::
vector
<
Scope
*>
&
,
const
ExecutionStrategy
&
,
const
BuildStrategy
&
,
ir
::
Graph
*>
())
const
std
::
vector
<
std
::
string
>
&
,
const
std
::
string
&
,
Scope
*
,
std
::
vector
<
Scope
*>
&
,
const
ExecutionStrategy
&
,
const
BuildStrategy
&
,
ir
::
Graph
*>
())
// NOTE: even we return a vec<Scope*>* to Python use reference policy.
// We still cannot get local_scope from this vector, since the element
// of vec<Scope*> will be freed by Python GC. We can only return Scope*
// one by one and mark them as reference.
.
def
(
"local_scopes"
,
.
def
(
"local_scopes"
,
[](
ParallelExecutor
&
self
)
->
std
::
vector
<
Scope
*>
*
{
return
&
self
.
GetLocalScopes
();
},
...
...
@@ -4284,7 +4506,8 @@ All parameter, weight, gradient are variables in Paddle.
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>>
(
m
,
"IpuBackend"
)
// manage IpuBackend in C++
.
def
(
"get_instance"
,
.
def
(
"get_instance"
,
[]()
{
return
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>
(
platform
::
ipu
::
IpuBackend
::
GetInstance
());
...
...
@@ -4330,7 +4553,8 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Failed to convert type: %s when set IpuStrategy "
"option: %s"
,
option
.
get_type
(),
option_name
));
option
.
get_type
(),
option_name
));
}
self
.
InsertStringOption
(
option_name
,
option_val
);
}
...
...
@@ -4338,7 +4562,8 @@ All parameter, weight, gradient are variables in Paddle.
if
(
option_name
.
rfind
(
"location_"
,
0
)
==
0
)
{
for
(
auto
option
:
element
.
second
.
cast
<
py
::
dict
>
())
{
self
.
SetTensorLocation
(
option_name
,
option
.
first
.
cast
<
std
::
string
>
(),
option_name
,
option
.
first
.
cast
<
std
::
string
>
(),
option
.
second
.
cast
<
std
::
uint64_t
>
());
}
}
else
if
(
option_name
==
"accumulate_outer_fragment"
)
{
...
...
@@ -4386,17 +4611,19 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Failed to convert value type: %s when set "
"IpuStrategy option: %s"
,
option
.
second
.
get_type
(),
option_key
));
option
.
second
.
get_type
(),
option_key
));
}
self
.
InsertStringPairOption
(
option_name
,
option_key
,
option_val
);
self
.
InsertStringPairOption
(
option_name
,
option_key
,
option_val
);
}
}
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Invalid IpuStrategy option value type: %s, please check "
"input value for option: %s"
,
element
.
second
.
get_type
(),
option_name
));
element
.
second
.
get_type
(),
option_name
));
}
}
})
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录