Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
69e82d83
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
69e82d83
编写于
6月 29, 2022
作者:
R
ronnywang
提交者:
GitHub
6月 29, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cherry pick 43890 (#43892)
* cherry pick 43890
上级
dc12605d
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
830 addition
and
603 deletion
+830
-603
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+830
-603
未找到文件。
paddle/fluid/pybind/pybind.cc
浏览文件 @
69e82d83
...
...
@@ -372,7 +372,8 @@ static T PyObjectCast(PyObject *obj) {
}
catch
(
py
::
cast_error
&
)
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Python object is not type of %s, the real type is %s"
,
typeid
(
T
).
name
(),
obj
->
ob_type
->
tp_name
));
typeid
(
T
).
name
(),
obj
->
ob_type
->
tp_name
));
}
}
...
...
@@ -431,7 +432,8 @@ static std::vector<std::string> inline GetNameList(
}
static
void
inline
CreateVariableIfNotExit
(
const
py
::
handle
&
py_handle
,
const
framework
::
Scope
&
scope
,
const
py
::
handle
&
py_handle
,
const
framework
::
Scope
&
scope
,
const
framework
::
Executor
*
exe
=
nullptr
)
{
std
::
vector
<
std
::
string
>
vec_res
;
...
...
@@ -469,8 +471,9 @@ static void inline CreateVariableIfNotExit(
PyObject
*
py_var_desc
=
PyObject_GetAttrString
(
PyList_GET_ITEM
(
py_obj
,
i
),
kVarDescField
);
PADDLE_ENFORCE_NOT_NULL
(
py_var_desc
,
platform
::
errors
::
InvalidArgument
(
"The var_desc of parameter to set is None"
));
py_var_desc
,
platform
::
errors
::
InvalidArgument
(
"The var_desc of parameter to set is None"
));
auto
var_desc
=
PyObjectCast
<
framework
::
VarDesc
>
(
py_var_desc
);
Py_DECREF
(
py_var_desc
);
var
=
const_cast
<
framework
::
Scope
*>
(
&
scope
)
->
Var
(
para_name
);
...
...
@@ -505,7 +508,8 @@ static void AssertStaticGraphAndDygraphGradMakerNoDiff() {
}
}
}
PADDLE_ENFORCE_EQ
(
ops
.
empty
(),
true
,
PADDLE_ENFORCE_EQ
(
ops
.
empty
(),
true
,
platform
::
errors
::
Unimplemented
(
"OperatorWithKernel [%s] have only static graph grad "
"maker or have only dygraph grad maker, which is not "
...
...
@@ -527,8 +531,10 @@ static int GetNCCLVersion() {
#endif
template
<
typename
PlaceType
>
static
void
TensorCopyFrom
(
framework
::
Tensor
*
dst
,
const
framework
::
Tensor
&
src
,
const
PlaceType
&
place
,
int64_t
batch_size
)
{
static
void
TensorCopyFrom
(
framework
::
Tensor
*
dst
,
const
framework
::
Tensor
&
src
,
const
PlaceType
&
place
,
int64_t
batch_size
)
{
if
(
batch_size
<
0
)
{
framework
::
TensorCopy
(
src
,
place
,
dst
);
}
else
{
...
...
@@ -612,9 +618,10 @@ PYBIND11_MODULE(core_noavx, m) {
PyCapsule_GetPointer
(
dltensor
->
ptr
(),
"dltensor"
));
PADDLE_ENFORCE_NOT_NULL
(
dmt
,
platform
::
errors
::
InvalidArgument
(
"from_dlpack received an invalid capsule. "
"Note that a DLPack tensor can be consumed only once."
));
dmt
,
platform
::
errors
::
InvalidArgument
(
"from_dlpack received an invalid capsule. "
"Note that a DLPack tensor can be consumed only once."
));
PyCapsule_SetName
(
dltensor
->
ptr
(),
"used_dltensor"
);
DLTensor
dl
=
dmt
->
dl_tensor
;
...
...
@@ -632,7 +639,8 @@ PYBIND11_MODULE(core_noavx, m) {
});
m
.
def
(
"_create_loaded_parameter"
,
[](
const
py
::
handle
&
vec_var_list
,
const
Scope
&
scope
,
[](
const
py
::
handle
&
vec_var_list
,
const
Scope
&
scope
,
const
Executor
*
executor
)
{
CreateVariableIfNotExit
(
vec_var_list
,
scope
,
executor
);
});
...
...
@@ -670,11 +678,12 @@ PYBIND11_MODULE(core_noavx, m) {
<<
", sci_mode="
<<
print_opt
.
sci_mode
;
});
m
.
def
(
"broadcast_shape"
,
[](
const
std
::
vector
<
int64_t
>
&
x_dim
,
const
std
::
vector
<
int64_t
>
&
y_dim
)
{
return
phi
::
vectorize
(
operators
::
details
::
BroadcastTwoDims
(
phi
::
make_ddim
(
x_dim
),
phi
::
make_ddim
(
y_dim
),
-
1
));
});
m
.
def
(
"broadcast_shape"
,
[](
const
std
::
vector
<
int64_t
>
&
x_dim
,
const
std
::
vector
<
int64_t
>
&
y_dim
)
{
return
phi
::
vectorize
(
operators
::
details
::
BroadcastTwoDims
(
phi
::
make_ddim
(
x_dim
),
phi
::
make_ddim
(
y_dim
),
-
1
));
});
m
.
def
(
"_append_python_callable_object_and_return_id"
,
...
...
@@ -685,56 +694,56 @@ PYBIND11_MODULE(core_noavx, m) {
m
.
def
(
"_get_use_default_grad_op_desc_maker_ops"
,
[]
{
return
OpInfoMap
::
Instance
().
GetUseDefaultGradOpDescMakerOps
();
});
m
.
def
(
"_get_all_register_op_kernels"
,
[](
const
std
::
string
&
lib
)
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
all_kernels_info
;
if
(
lib
==
"fluid"
||
lib
==
"all"
)
{
auto
&
all_kernels
=
paddle
::
framework
::
OperatorWithKernel
::
AllOpKernels
();
for
(
auto
&
kernel_pair
:
all_kernels
)
{
auto
op_type
=
kernel_pair
.
first
;
std
::
vector
<
std
::
string
>
kernel_types
;
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
paddle
::
framework
::
OpKernelType
kernel_type
=
info_pair
.
first
;
kernel_types
.
emplace_back
(
paddle
::
framework
::
KernelTypeToString
(
kernel_type
));
}
all_kernels_info
.
emplace
(
op_type
,
kernel_types
);
m
.
def
(
"_get_all_register_op_kernels"
,
[](
const
std
::
string
&
lib
)
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
all_kernels_info
;
if
(
lib
==
"fluid"
||
lib
==
"all"
)
{
auto
&
all_kernels
=
paddle
::
framework
::
OperatorWithKernel
::
AllOpKernels
();
for
(
auto
&
kernel_pair
:
all_kernels
)
{
auto
op_type
=
kernel_pair
.
first
;
std
::
vector
<
std
::
string
>
kernel_types
;
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
paddle
::
framework
::
OpKernelType
kernel_type
=
info_pair
.
first
;
kernel_types
.
emplace_back
(
paddle
::
framework
::
KernelTypeToString
(
kernel_type
));
}
all_kernels_info
.
emplace
(
op_type
,
kernel_types
);
}
if
(
lib
==
"phi"
||
lib
==
"all"
)
{
auto
phi_kernels
=
phi
::
KernelFactory
::
Instance
().
kernels
();
for
(
auto
&
kernel_pair
:
phi_kernels
)
{
auto
op_type
=
phi
::
TransToFluidOpName
(
kernel_pair
.
first
);
std
::
vector
<
std
::
string
>
kernel_types
;
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
framework
::
OpKernelType
kernel_type
=
framework
::
TransPhiKernelKeyToOpKernelType
(
info_pair
.
first
);
auto
kernel_type_str
=
framework
::
KernelTypeToString
(
kernel_type
);
if
(
all_kernels_info
.
count
(
op_type
))
{
if
(
std
::
find
(
all_kernels_info
[
op_type
].
begin
(),
all_kernels_info
[
op_type
].
end
(),
kernel_type_str
)
==
all_kernels_info
[
op_type
].
end
())
{
all_kernels_info
[
op_type
].
emplace_back
(
kernel_type_str
);
}
}
else
{
kernel_types
.
emplace_back
(
kernel_type_str
);
}
if
(
lib
==
"phi"
||
lib
==
"all"
)
{
auto
phi_kernels
=
phi
::
KernelFactory
::
Instance
().
kernels
();
for
(
auto
&
kernel_pair
:
phi_kernels
)
{
auto
op_type
=
phi
::
TransToFluidOpName
(
kernel_pair
.
first
);
std
::
vector
<
std
::
string
>
kernel_types
;
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
framework
::
OpKernelType
kernel_type
=
framework
::
TransPhiKernelKeyToOpKernelType
(
info_pair
.
first
);
auto
kernel_type_str
=
framework
::
KernelTypeToString
(
kernel_type
);
if
(
all_kernels_info
.
count
(
op_type
))
{
if
(
std
::
find
(
all_kernels_info
[
op_type
].
begin
(),
all_kernels_info
[
op_type
].
end
(),
kernel_type_str
)
==
all_kernels_info
[
op_type
].
end
())
{
all_kernels_info
[
op_type
].
emplace_back
(
kernel_type_str
);
}
}
if
(
!
kernel_types
.
empty
())
{
all_kernels_info
.
emplace
(
op_type
,
kernel_types
);
}
else
{
kernel_types
.
emplace_back
(
kernel_type_str
);
}
}
if
(
!
kernel_types
.
empty
())
{
all_kernels_info
.
emplace
(
op_type
,
kernel_types
);
}
}
}
return
all_kernels_info
;
},
py
::
arg
(
"lib"
)
=
"all"
,
R"DOC(
return
all_kernels_info
;
},
py
::
arg
(
"lib"
)
=
"all"
,
R"DOC(
Return the registered kernels in paddle.
Args:
...
...
@@ -796,14 +805,22 @@ PYBIND11_MODULE(core_noavx, m) {
self
.
EmplaceBackOutput
(
std
::
move
(
CastPyArg2Tensor
(
obj
,
1
)));
}
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
bool
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
float
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int64_t
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
bool
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
float
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int64_t
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
string
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
...
...
@@ -817,13 +834,14 @@ PYBIND11_MODULE(core_noavx, m) {
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
vector
<
int64_t
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
vector
<
std
::
string
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
});
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
vector
<
std
::
string
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
});
py
::
class_
<
framework
::
Tensor
>
framework_tensor
(
m
,
"Tensor"
,
py
::
buffer_protocol
());
py
::
class_
<
framework
::
Tensor
>
framework_tensor
(
m
,
"Tensor"
,
py
::
buffer_protocol
());
g_framework_tensor_pytype
=
reinterpret_cast
<
PyTypeObject
*>
(
framework_tensor
.
ptr
());
framework_tensor
...
...
@@ -898,70 +916,118 @@ PYBIND11_MODULE(core_noavx, m) {
self
.
mutable_data
<
float
>
(
place
);
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
XPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
XPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
MLUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
MLUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_clear"
,
&
framework
::
Tensor
::
clear
)
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
NPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
NPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
Place
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
IPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
,
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
Place
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
IPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
,
R"DOC(
Set the data of Tensor on place with given numpy array.
...
...
@@ -985,9 +1051,10 @@ PYBIND11_MODULE(core_noavx, m) {
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
)DOC"
)
.
def
(
"shape"
,
[](
framework
::
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
},
R"DOC(
.
def
(
"shape"
,
[](
framework
::
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
},
R"DOC(
Return the shape of Tensor.
Returns:
...
...
@@ -1046,25 +1113,26 @@ PYBIND11_MODULE(core_noavx, m) {
ostr
<<
self
;
return
ostr
.
str
();
})
/* ------ End of original Tensor ------ */
.
def
(
"__init__"
,
[](
framework
::
Tensor
&
instance
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
recursive_sequence_lengths
)
{
LoD
new_lod
;
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
std
::
copy
(
recursive_sequence_lengths
.
begin
(),
recursive_sequence_lengths
.
end
(),
std
::
back_inserter
(
new_lod
));
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_offset_lod
,
-
1
),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided recursive_sequence_lengths info is "
"invalid, "
"the LoD converted by recursive_sequence_lengths is %s"
,
new_lod
));
new
(
&
instance
)
framework
::
Tensor
(
new_offset_lod
);
})
.
def
(
"__init__"
,
[](
framework
::
Tensor
&
instance
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
recursive_sequence_lengths
)
{
LoD
new_lod
;
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
std
::
copy
(
recursive_sequence_lengths
.
begin
(),
recursive_sequence_lengths
.
end
(),
std
::
back_inserter
(
new_lod
));
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_offset_lod
,
-
1
),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided recursive_sequence_lengths info is "
"invalid, "
"the LoD converted by recursive_sequence_lengths is %s"
,
new_lod
));
new
(
&
instance
)
framework
::
Tensor
(
new_offset_lod
);
})
.
def
(
"__init__"
,
[](
framework
::
Tensor
&
instance
)
{
new
(
&
instance
)
framework
::
Tensor
();
...
...
@@ -1075,20 +1143,23 @@ PYBIND11_MODULE(core_noavx, m) {
// avoid misuse.
// The discussion is here:
// https://github.com/PaddlePaddle/Paddle/issues/10855
.
def
(
"set_lod"
,
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
lod
)
{
// the input lod is offset-based level-of-detail info
LoD
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided LoD is invalid, the LoD is %s"
,
new_lod
));
self
.
set_lod
(
new_lod
);
},
py
::
arg
(
"lod"
),
R"DOC(
.
def
(
"set_lod"
,
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
lod
)
{
// the input lod is offset-based level-of-detail info
LoD
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided LoD is invalid, the LoD is %s"
,
new_lod
));
self
.
set_lod
(
new_lod
);
},
py
::
arg
(
"lod"
),
R"DOC(
Set LoD of the Tensor.
Args:
...
...
@@ -1108,28 +1179,32 @@ PYBIND11_MODULE(core_noavx, m) {
t.set_lod([[0, 2, 5]])
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
.
def
(
"set_recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
recursive_sequence_lengths
)
{
// the input recursive_sequence_lengths is length-based
// level-of-detail info
LoD
new_lod
;
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
std
::
copy
(
recursive_sequence_lengths
.
begin
(),
recursive_sequence_lengths
.
end
(),
std
::
back_inserter
(
new_lod
));
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_offset_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided recursive_sequence_lengths info is "
"invalid, "
"the LoD converted by recursive_sequence_lengths is "
"%s"
,
new_lod
));
self
.
set_lod
(
new_offset_lod
);
},
py
::
arg
(
"recursive_sequence_lengths"
),
R"DOC(
.
def
(
"set_recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
recursive_sequence_lengths
)
{
// the input recursive_sequence_lengths is length-based
// level-of-detail info
LoD
new_lod
;
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
std
::
copy
(
recursive_sequence_lengths
.
begin
(),
recursive_sequence_lengths
.
end
(),
std
::
back_inserter
(
new_lod
));
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_offset_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided recursive_sequence_lengths info is "
"invalid, "
"the LoD converted by recursive_sequence_lengths is "
"%s"
,
new_lod
));
self
.
set_lod
(
new_offset_lod
);
},
py
::
arg
(
"recursive_sequence_lengths"
),
R"DOC(
Set LoD of the Tensor according to recursive sequence lengths.
For example, if recursive_sequence_lengths=[[2, 3]], which means
...
...
@@ -1154,16 +1229,17 @@ PYBIND11_MODULE(core_noavx, m) {
print(t.recursive_sequence_lengths()) # [[2, 3]]
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
.
def
(
"lod"
,
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
// output the offset-based lod info
LoD
lod
=
self
.
lod
();
std
::
vector
<
std
::
vector
<
size_t
>>
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
return
new_lod
;
},
R"DOC(
.
def
(
"lod"
,
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
// output the offset-based lod info
LoD
lod
=
self
.
lod
();
std
::
vector
<
std
::
vector
<
size_t
>>
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
return
new_lod
;
},
R"DOC(
Return the LoD of the Tensor.
Returns:
...
...
@@ -1181,16 +1257,17 @@ PYBIND11_MODULE(core_noavx, m) {
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
// Set above comments of set_lod.
.
def
(
"recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
// output the length-based lod info
LoD
lod
=
phi
::
ConvertToLengthBasedLoD
(
self
.
lod
());
std
::
vector
<
std
::
vector
<
size_t
>>
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
return
new_lod
;
},
R"DOC(
.
def
(
"recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
// output the length-based lod info
LoD
lod
=
phi
::
ConvertToLengthBasedLoD
(
self
.
lod
());
std
::
vector
<
std
::
vector
<
size_t
>>
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
return
new_lod
;
},
R"DOC(
Return the recursive sequence lengths corresponding to of the LodD
of the Tensor.
...
...
@@ -1208,13 +1285,14 @@ PYBIND11_MODULE(core_noavx, m) {
t.set_recursive_sequence_lengths([[2, 3]])
print(t.recursive_sequence_lengths()) # [[2, 3]]
)DOC"
)
.
def
(
"has_valid_recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
)
->
bool
{
// Check that the lod info is valid and match the outermost
// dimension of the Tensor data
return
CheckLoD
(
self
.
lod
(),
vectorize
(
self
.
dims
()).
front
());
},
R"DOC(
.
def
(
"has_valid_recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
)
->
bool
{
// Check that the lod info is valid and match the outermost
// dimension of the Tensor data
return
CheckLoD
(
self
.
lod
(),
vectorize
(
self
.
dims
()).
front
());
},
R"DOC(
Check whether the LoD of the Tensor is valid.
Returns:
...
...
@@ -1594,13 +1672,15 @@ PYBIND11_MODULE(core_noavx, m) {
new
(
&
instance
)
phi
::
SelectedRows
();
})
.
def
(
"__init__"
,
[](
phi
::
SelectedRows
&
instance
,
const
std
::
vector
<
int64_t
>
rows
,
[](
phi
::
SelectedRows
&
instance
,
const
std
::
vector
<
int64_t
>
rows
,
const
int64_t
&
height
)
{
new
(
&
instance
)
phi
::
SelectedRows
(
rows
,
height
);
})
.
def
(
"get_tensor"
,
[](
phi
::
SelectedRows
&
self
)
{
return
self
.
mutable_value
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_tensor"
,
[](
phi
::
SelectedRows
&
self
)
{
return
self
.
mutable_value
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"numel"
,
[](
phi
::
SelectedRows
&
self
)
->
int64_t
{
return
self
.
value
().
numel
();
...
...
@@ -1642,11 +1722,12 @@ All parameter, weight, gradient are variables in Paddle.
})
.
def
(
"get_float"
,
[](
const
Variable
&
var
)
->
float
{
return
var
.
Get
<
float
>
();
})
.
def
(
"get_tensor"
,
[](
Variable
&
self
)
->
LoDTensor
*
{
return
self
.
GetMutable
<
LoDTensor
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_tensor"
,
[](
Variable
&
self
)
->
LoDTensor
*
{
return
self
.
GetMutable
<
LoDTensor
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_bytes"
,
[](
Variable
&
self
)
{
return
py
::
bytes
(
*
self
.
GetMutable
<
std
::
string
>
());
...
...
@@ -1655,55 +1736,66 @@ All parameter, weight, gradient are variables in Paddle.
[](
Variable
&
self
,
Strings
str_list
)
{
*
self
.
GetMutable
<
Strings
>
()
=
str_list
;
})
.
def
(
"set_vocab"
,
[](
Variable
&
self
,
Vocab
vocab
)
{
*
self
.
GetMutable
<
Vocab
>
()
=
vocab
;
})
.
def
(
"get_string_tensor"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Strings
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_map_tensor"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Vocab
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_rank_table"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDRankTable
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_selected_rows"
,
[](
Variable
&
self
)
->
phi
::
SelectedRows
*
{
return
self
.
GetMutable
<
phi
::
SelectedRows
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_tensor_array"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_fetch_list"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
FetchList
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"set_vocab"
,
[](
Variable
&
self
,
Vocab
vocab
)
{
*
self
.
GetMutable
<
Vocab
>
()
=
vocab
;
})
.
def
(
"get_string_tensor"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Strings
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_map_tensor"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Vocab
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_rank_table"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDRankTable
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_selected_rows"
,
[](
Variable
&
self
)
->
phi
::
SelectedRows
*
{
return
self
.
GetMutable
<
phi
::
SelectedRows
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_tensor_array"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_fetch_list"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
FetchList
>
();
},
py
::
return_value_policy
::
reference
)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
.
def
(
"get_communicator"
,
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
return
self
.
GetMutable
<
platform
::
Communicator
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_communicator"
,
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
return
self
.
GetMutable
<
platform
::
Communicator
>
();
},
py
::
return_value_policy
::
reference
)
#endif
.
def
(
"get_reader"
,
[](
Variable
&
self
)
->
framework
::
ReaderHolder
*
{
PADDLE_ENFORCE_EQ
(
self
.
IsType
<
framework
::
ReaderHolder
>
(),
true
,
platform
::
errors
::
InvalidArgument
(
"The variable is not type of ReaderHolder."
));
return
self
.
GetMutable
<
framework
::
ReaderHolder
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_scope"
,
[](
Variable
&
self
)
->
Scope
*
{
auto
scope_vec
=
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
PADDLE_ENFORCE_GT
(
scope_vec
->
size
(),
0
,
platform
::
errors
::
InvalidArgument
(
"The size of scope_vec should be greater than 0"
));
return
scope_vec
->
front
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_reader"
,
[](
Variable
&
self
)
->
framework
::
ReaderHolder
*
{
PADDLE_ENFORCE_EQ
(
self
.
IsType
<
framework
::
ReaderHolder
>
(),
true
,
platform
::
errors
::
InvalidArgument
(
"The variable is not type of ReaderHolder."
));
return
self
.
GetMutable
<
framework
::
ReaderHolder
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_scope"
,
[](
Variable
&
self
)
->
Scope
*
{
auto
scope_vec
=
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
PADDLE_ENFORCE_GT
(
scope_vec
->
size
(),
0
,
platform
::
errors
::
InvalidArgument
(
"The size of scope_vec should be greater than 0"
));
return
scope_vec
->
front
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"set_scope"
,
[](
Variable
&
self
,
Scope
&
scope
)
{
auto
scope_vec
=
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
scope_vec
->
emplace_back
(
&
scope
);
...
...
@@ -1736,12 +1828,13 @@ All parameter, weight, gradient are variables in Paddle.
_Scope
.
def
(
"_remove_from_pool"
,
[](
Scope
&
self
)
{
ScopePool
::
Instance
().
Remove
(
&
self
);
})
.
def
(
"var"
,
[](
Scope
&
self
,
const
std
::
string
&
name
)
->
Variable
*
{
return
self
.
Var
(
name
);
},
py
::
arg
(
"name"
),
R"DOC(
.
def
(
"var"
,
[](
Scope
&
self
,
const
std
::
string
&
name
)
->
Variable
*
{
return
self
.
Var
(
name
);
},
py
::
arg
(
"name"
),
R"DOC(
Find or create variable named :code:`name` in the current scope.
If the variable named :code:`name` does not exist in the
...
...
@@ -1754,8 +1847,10 @@ All parameter, weight, gradient are variables in Paddle.
Returns:
out (core.Variable): the found or created variable.
)DOC"
,
py
::
return_value_policy
::
reference
)
.
def
(
"find_var"
,
&
Scope
::
FindVar
,
py
::
arg
(
"name"
),
py
::
return_value_policy
::
reference
)
.
def
(
"find_var"
,
&
Scope
::
FindVar
,
py
::
arg
(
"name"
),
R"DOC(
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
...
...
@@ -1768,7 +1863,9 @@ All parameter, weight, gradient are variables in Paddle.
)DOC"
,
py
::
return_value_policy
::
reference
)
.
def
(
"size"
,
&
Scope
::
Size
)
.
def
(
"erase"
,
&
Scope
::
EraseVars
,
py
::
arg
(
"names"
),
.
def
(
"erase"
,
&
Scope
::
EraseVars
,
py
::
arg
(
"names"
),
R"DOC(
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
...
...
@@ -1780,33 +1877,37 @@ All parameter, weight, gradient are variables in Paddle.
None
)DOC"
,
py
::
return_value_policy
::
reference
)
.
def
(
"new_scope"
,
[](
Scope
&
self
)
->
Scope
*
{
return
&
self
.
NewScope
();
},
R"DOC(
.
def
(
"new_scope"
,
[](
Scope
&
self
)
->
Scope
*
{
return
&
self
.
NewScope
();
},
R"DOC(
Create a new sub-scope of the current scope.
Returns:
out (core._Scope): the created sub-scope.
)DOC"
,
py
::
return_value_policy
::
reference
)
.
def
(
"drop_kids"
,
&
Scope
::
DropKids
,
py
::
return_value_policy
::
reference
)
.
def
(
"drop_kids"
,
&
Scope
::
DropKids
,
R"DOC(
Delete all sub-scopes of the current scope.
)DOC"
)
.
def
(
"_kids"
,
&
Scope
::
kids
);
m
.
def
(
"Scope"
,
[]()
->
Scope
*
{
auto
*
s
=
new
Scope
();
ScopePool
::
Instance
().
Insert
(
std
::
unique_ptr
<
Scope
>
(
s
));
return
s
;
},
R"DOC(
m
.
def
(
"Scope"
,
[]()
->
Scope
*
{
auto
*
s
=
new
Scope
();
ScopePool
::
Instance
().
Insert
(
std
::
unique_ptr
<
Scope
>
(
s
));
return
s
;
},
R"DOC(
Create a new scope.
Returns:
out (core._Scope): the created scope.
)DOC"
,
py
::
return_value_policy
::
reference
);
py
::
return_value_policy
::
reference
);
//! @note: Be careful! PyBind will return std::string as an unicode, not
//! Python str. If you want a str object, you should cast them in Python.
...
...
@@ -1817,7 +1918,8 @@ All parameter, weight, gradient are variables in Paddle.
if
(
info
.
HasOpProtoAndChecker
())
{
std
::
string
str
;
PADDLE_ENFORCE_EQ
(
info
.
Proto
().
SerializeToString
(
&
str
),
true
,
info
.
Proto
().
SerializeToString
(
&
str
),
true
,
platform
::
errors
::
Fatal
(
"Serialize OpProto Error. This could be a bug of Paddle."
));
ret_values
.
emplace_back
(
str
);
...
...
@@ -1838,22 +1940,24 @@ All parameter, weight, gradient are variables in Paddle.
}
return
res
;
});
m
.
def
(
"get_grad_op_desc"
,
[](
const
OpDesc
&
op_desc
,
const
std
::
unordered_set
<
std
::
string
>
&
no_grad_set
,
const
std
::
vector
<
BlockDesc
*>
&
grad_sub_block
)
{
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_to_var
;
std
::
vector
<
std
::
unique_ptr
<
OpDesc
>>
grad_op_descs
=
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
&
grad_to_var
,
grad_sub_block
);
std
::
vector
<
OpDesc
*>
grad_op_desc_ptrs
(
grad_op_descs
.
size
());
std
::
transform
(
grad_op_descs
.
begin
(),
grad_op_descs
.
end
(),
grad_op_desc_ptrs
.
begin
(),
[](
std
::
unique_ptr
<
OpDesc
>
&
p
)
{
return
p
.
release
();
});
return
std
::
make_pair
(
grad_op_desc_ptrs
,
grad_to_var
);
});
m
.
def
(
"get_grad_op_desc"
,
[](
const
OpDesc
&
op_desc
,
const
std
::
unordered_set
<
std
::
string
>
&
no_grad_set
,
const
std
::
vector
<
BlockDesc
*>
&
grad_sub_block
)
{
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_to_var
;
std
::
vector
<
std
::
unique_ptr
<
OpDesc
>>
grad_op_descs
=
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
&
grad_to_var
,
grad_sub_block
);
std
::
vector
<
OpDesc
*>
grad_op_desc_ptrs
(
grad_op_descs
.
size
());
std
::
transform
(
grad_op_descs
.
begin
(),
grad_op_descs
.
end
(),
grad_op_desc_ptrs
.
begin
(),
[](
std
::
unique_ptr
<
OpDesc
>
&
p
)
{
return
p
.
release
();
});
return
std
::
make_pair
(
grad_op_desc_ptrs
,
grad_to_var
);
});
m
.
def
(
"has_grad_op_maker"
,
[](
const
std
::
string
op_type
)
{
return
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type
).
HasGradOpMaker
();
});
...
...
@@ -1866,7 +1970,8 @@ All parameter, weight, gradient are variables in Paddle.
return
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type
).
HasInferInplace
();
});
m
.
def
(
"infer_no_need_buffer_slots"
,
[](
const
std
::
string
op_type
,
const
framework
::
VariableNameMap
&
inputs
,
[](
const
std
::
string
op_type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
{
auto
infer_func
=
framework
::
OpInfoMap
::
Instance
()
...
...
@@ -1879,25 +1984,27 @@ All parameter, weight, gradient are variables in Paddle.
return
empty
;
}
});
m
.
def
(
"prune"
,
[](
const
ProgramDesc
&
origin
,
const
std
::
set
<
std
::
string
>
&
feeded_var_names
,
const
std
::
vector
<
std
::
array
<
size_t
,
2
>>
&
targets
)
{
ProgramDesc
prog_with_targets
(
origin
);
for
(
const
auto
&
t
:
targets
)
{
prog_with_targets
.
MutableBlock
(
t
[
0
])
->
Op
(
t
[
1
])
->
SetIsTarget
(
true
);
}
proto
::
ProgramDesc
pruned_desc
;
auto
pruned_origin_block_id_map
=
Prune
(
*
prog_with_targets
.
Proto
(),
feeded_var_names
,
&
pruned_desc
);
return
std
::
make_tuple
(
ProgramDesc
(
pruned_desc
),
pruned_origin_block_id_map
);
});
m
.
def
(
"prune_backward"
,
[](
const
framework
::
ProgramDesc
&
program
)
{
return
PruneBackward
(
program
);
},
R"DOC(
m
.
def
(
"prune"
,
[](
const
ProgramDesc
&
origin
,
const
std
::
set
<
std
::
string
>
&
feeded_var_names
,
const
std
::
vector
<
std
::
array
<
size_t
,
2
>>
&
targets
)
{
ProgramDesc
prog_with_targets
(
origin
);
for
(
const
auto
&
t
:
targets
)
{
prog_with_targets
.
MutableBlock
(
t
[
0
])
->
Op
(
t
[
1
])
->
SetIsTarget
(
true
);
}
proto
::
ProgramDesc
pruned_desc
;
auto
pruned_origin_block_id_map
=
Prune
(
*
prog_with_targets
.
Proto
(),
feeded_var_names
,
&
pruned_desc
);
return
std
::
make_tuple
(
ProgramDesc
(
pruned_desc
),
pruned_origin_block_id_map
);
});
m
.
def
(
"prune_backward"
,
[](
const
framework
::
ProgramDesc
&
program
)
{
return
PruneBackward
(
program
);
},
R"DOC(
Prune the backward part of a program, mostly called in
program.clone(for_test=True).
...
...
@@ -2040,12 +2147,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types
=
phi
::
DeviceManager
::
GetAllDeviceTypes
();
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_all_device_type because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_all_device_type, please try to install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
return
device_types
;
});
...
...
@@ -2054,12 +2161,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types
=
phi
::
DeviceManager
::
GetAllCustomDeviceTypes
();
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_all_custom_device_type because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_all_custom_device_type, please try to "
"install CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
return
device_types
;
});
...
...
@@ -2068,12 +2175,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices
=
phi
::
DeviceManager
::
GetAllDeviceList
();
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_available_device because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_available_device, please try to install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
return
devices
;
});
...
...
@@ -2082,18 +2189,19 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices
=
phi
::
DeviceManager
::
GetAllCustomDeviceList
();
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_available_custom_device because you have "
"installed"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_available_custom_device, please try to "
"install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
return
devices
;
});
py
::
class_
<
platform
::
CustomPlace
>
(
m
,
"CustomPlace"
,
py
::
class_
<
platform
::
CustomPlace
>
(
m
,
"CustomPlace"
,
R"DOC(
CustomPlace is a descriptor of a device.
It represents a custom device on which a tensor will be allocated and a model will run.
...
...
@@ -2105,7 +2213,8 @@ All parameter, weight, gradient are variables in Paddle.
fake_cpu_place = paddle.CustomPlace("FakeCPU", 0)
)DOC"
)
.
def
(
"__init__"
,
[](
platform
::
CustomPlace
&
self
,
const
std
::
string
&
device_type
,
[](
platform
::
CustomPlace
&
self
,
const
std
::
string
&
device_type
,
int
dev_id
)
{
#ifdef PADDLE_WITH_CUSTOM_DEVICE
if
(
UNLIKELY
(
dev_id
<
0
))
{
...
...
@@ -2113,7 +2222,8 @@ All parameter, weight, gradient are variables in Paddle.
"Invalid CustomPlace(%s, %d), device id must be 0 "
"or "
"positive integer"
,
device_type
,
dev_id
);
device_type
,
dev_id
);
std
::
exit
(
-
1
);
}
...
...
@@ -2134,7 +2244,11 @@ All parameter, weight, gradient are variables in Paddle.
"inside "
"[0, %d), because %s "
"number on your machine is %d"
,
device_type
,
dev_id
,
dev_count
,
device_type
,
dev_count
);
device_type
,
dev_id
,
dev_count
,
device_type
,
dev_count
);
std
::
exit
(
-
1
);
}
}
...
...
@@ -2144,7 +2258,8 @@ All parameter, weight, gradient are variables in Paddle.
"Invalid CustomPlace(%s, %d), the device type is "
"not registered "
"as a custom device."
,
device_type
,
dev_id
);
device_type
,
dev_id
);
std
::
exit
(
-
1
);
}
#else
...
...
@@ -2153,7 +2268,7 @@ All parameter, weight, gradient are variables in Paddle.
"version PaddlePaddle.
\n
"
"If you want to use CustomDevice, please try to install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
"PaddlePaddle by: pip install paddlepaddle
\n
"
"If you only have CPU, please change "
"CustomPlace(%s, %d) to be CPUPlace().
\n
"
,
device_type
,
dev_id
);
...
...
@@ -2215,7 +2330,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid CUDAPlace(%d), must inside [0, %d), because GPU "
"number on your machine is %d"
,
dev_id
,
platform
::
GetGPUDeviceCount
(),
dev_id
,
platform
::
GetGPUDeviceCount
(),
platform
::
GetGPUDeviceCount
());
std
::
exit
(
-
1
);
}
...
...
@@ -2281,7 +2397,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid XPUPlace(%d), must inside [0, %d), because XPU "
"number on your machine is %d"
,
dev_id
,
platform
::
GetXPUDeviceCount
(),
dev_id
,
platform
::
GetXPUDeviceCount
(),
platform
::
GetXPUDeviceCount
());
std
::
exit
(
-
1
);
}
...
...
@@ -2446,7 +2563,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid NPUPlace(%d), must inside [0, %d), because NPU "
"number on your machine is %d"
,
dev_id
,
platform
::
GetNPUDeviceCount
(),
dev_id
,
platform
::
GetNPUDeviceCount
(),
platform
::
GetNPUDeviceCount
());
std
::
exit
(
-
1
);
}
...
...
@@ -2562,7 +2680,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid MLUPlace(%d), must inside [0, %d), because MLU "
"number on your machine is %d"
,
dev_id
,
platform
::
GetMLUDeviceCount
(),
dev_id
,
platform
::
GetMLUDeviceCount
(),
platform
::
GetMLUDeviceCount
());
std
::
exit
(
-
1
);
}
...
...
@@ -2635,8 +2754,10 @@ All parameter, weight, gradient are variables in Paddle.
.
def
(
"mlu_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"custom_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
const
platform
::
Place
&
other
)
{
self
=
other
;
})
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
const
platform
::
Place
&
other
)
{
self
=
other
;
})
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
const
platform
::
CPUPlace
&
cpu_place
)
{
self
=
cpu_place
;
...
...
@@ -2681,7 +2802,8 @@ All parameter, weight, gradient are variables in Paddle.
true
,
platform
::
errors
::
InvalidArgument
(
"Cannot parse user input to OpDesc"
));
PADDLE_ENFORCE_EQ
(
desc
.
IsInitialized
(),
true
,
PADDLE_ENFORCE_EQ
(
desc
.
IsInitialized
(),
true
,
platform
::
errors
::
InvalidArgument
(
"The provided OpDesc is not "
"initialized, the reason is: %s"
,
...
...
@@ -2689,37 +2811,43 @@ All parameter, weight, gradient are variables in Paddle.
return
OpRegistry
::
CreateOp
(
desc
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
XPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
NPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CUDAPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CUDAPinnedPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
})
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
MLUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
...
...
@@ -2729,8 +2857,8 @@ All parameter, weight, gradient are variables in Paddle.
.
def
(
"outputs"
,
[](
const
OperatorBase
&
op
)
->
std
::
map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
{
return
op
.
Outputs
();
})
return
op
.
Outputs
();
})
.
def
(
"output_vars"
,
[](
const
OperatorBase
&
op
)
{
return
op
.
OutputVars
(
true
);
})
.
def
(
"inputs"
,
[](
const
OperatorBase
&
op
)
{
return
op
.
Inputs
();
})
...
...
@@ -2745,11 +2873,12 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
framework
::
TrainerBase
,
std
::
shared_ptr
<
framework
::
TrainerBase
>>
(
m
,
"TrainerBase"
)
.
def
(
"get_worker_scope"
,
[](
TrainerBase
&
self
,
int
thread_id
)
->
Scope
*
{
return
self
.
GetWorkerScope
(
thread_id
);
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_worker_scope"
,
[](
TrainerBase
&
self
,
int
thread_id
)
->
Scope
*
{
return
self
.
GetWorkerScope
(
thread_id
);
},
py
::
return_value_policy
::
reference
)
.
def
(
"finalize"
,
&
TrainerBase
::
Finalize
)
.
def
(
"ResetDataset"
,
&
TrainerBase
::
ResetDataset
);
...
...
@@ -2758,13 +2887,17 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
framework
::
Executor
>
(
m
,
"Executor"
)
.
def
(
py
::
init
<
const
platform
::
Place
&>
())
.
def
(
"close"
,
&
Executor
::
Close
)
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
,
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"release_trainer"
,
&
Executor
::
ReleaseTrainer
,
.
def
(
"release_trainer"
,
&
Executor
::
ReleaseTrainer
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"init_for_dataset"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
const
std
::
string
&
trainer_desc
,
Scope
*
scope
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
const
std
::
string
&
trainer_desc
,
Scope
*
scope
,
Dataset
*
dataset
)
->
std
::
shared_ptr
<
TrainerBase
>
{
pybind11
::
gil_scoped_release
release
;
return
self
.
InitForDataset
(
prog
,
trainer_desc
,
scope
,
dataset
);
...
...
@@ -2775,42 +2908,64 @@ All parameter, weight, gradient are variables in Paddle.
self
.
RunFromDataset
(
trainer
);
})
.
def
(
"run_prepared_ctx"
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>
*
feed_targets
,
std
::
map
<
std
::
string
,
FetchType
*>
*
fetch_targets
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
fetch_holder_name
=
"fetch"
)
{
pybind11
::
gil_scoped_release
release
;
self
.
RunPreparedContext
(
ctx
,
scope
,
feed_targets
,
fetch_targets
,
create_local_scope
,
create_vars
,
feed_holder_name
,
fetch_holder_name
);
self
.
RunPreparedContext
(
ctx
,
scope
,
feed_targets
,
fetch_targets
,
create_local_scope
,
create_vars
,
feed_holder_name
,
fetch_holder_name
);
})
.
def
(
"run_prepared_ctx"
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
bool
keep_kids
=
false
)
{
pybind11
::
gil_scoped_release
release
;
self
.
RunPreparedContext
(
ctx
,
scope
,
create_local_scope
,
create_vars
,
keep_kids
);
self
.
RunPreparedContext
(
ctx
,
scope
,
create_local_scope
,
create_vars
,
keep_kids
);
})
.
def
(
"prepare"
,
[](
Executor
&
self
,
const
ProgramDesc
&
program
,
int
block_id
,
[](
Executor
&
self
,
const
ProgramDesc
&
program
,
int
block_id
,
const
std
::
vector
<
std
::
string
>
&
skip_ref_cnt_vars
=
std
::
vector
<
std
::
string
>
(),
bool
force_disable_gc
=
false
)
{
pybind11
::
gil_scoped_release
release
;
return
self
.
Prepare
(
program
,
block_id
,
skip_ref_cnt_vars
,
force_disable_gc
);
return
self
.
Prepare
(
program
,
block_id
,
skip_ref_cnt_vars
,
force_disable_gc
);
})
.
def
(
"create_variables"
,
&
Executor
::
CreateVariables
)
.
def
(
"run"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
Scope
*
scope
,
int
block_id
,
bool
create_local_scope
,
bool
create_vars
,
const
std
::
vector
<
std
::
string
>
&
fetch_vars
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
prog
,
scope
,
block_id
,
create_local_scope
,
create_vars
,
fetch_vars
);
});
.
def
(
"run"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
Scope
*
scope
,
int
block_id
,
bool
create_local_scope
,
bool
create_vars
,
const
std
::
vector
<
std
::
string
>
&
fetch_vars
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
prog
,
scope
,
block_id
,
create_local_scope
,
create_vars
,
fetch_vars
);
});
py
::
class_
<
framework
::
interpreter
::
CostInfo
>
(
m
,
"CostInfo"
)
.
def
(
py
::
init
<>
())
...
...
@@ -2821,8 +2976,10 @@ All parameter, weight, gradient are variables in Paddle.
});
py
::
class_
<
framework
::
StandaloneExecutor
>
(
m
,
"StandaloneExecutor"
)
.
def
(
py
::
init
<
const
platform
::
Place
&
,
const
ProgramDesc
&
,
const
ProgramDesc
&
,
Scope
*>
())
.
def
(
py
::
init
<
const
platform
::
Place
&
,
const
ProgramDesc
&
,
const
ProgramDesc
&
,
Scope
*>
())
.
def
(
"run"
,
[](
StandaloneExecutor
&
self
,
const
std
::
unordered_map
<
std
::
string
,
py
::
array
>
&
input_dict
,
...
...
@@ -2866,11 +3023,13 @@ All parameter, weight, gradient are variables in Paddle.
return
py
::
cast
(
std
::
move
(
ret
));
})
.
def
(
"run"
,
[](
StandaloneExecutor
&
self
,
std
::
vector
<
std
::
string
>
feed_names
,
[](
StandaloneExecutor
&
self
,
std
::
vector
<
std
::
string
>
feed_names
,
std
::
vector
<
std
::
string
>
fetch_names
)
{
platform
::
RecordEvent
record_event
(
"StandaloneExecutor:run"
,
platform
::
TracerEventType
::
UserDefined
,
1
);
platform
::
TracerEventType
::
UserDefined
,
1
);
paddle
::
framework
::
FetchList
ret
;
{
pybind11
::
gil_scoped_release
release
;
...
...
@@ -2951,21 +3110,30 @@ All parameter, weight, gradient are variables in Paddle.
});
m
.
def
(
"memory_stat_get_current"
,
memory
::
StatGetCurrentValue
);
m
.
def
(
"memory_stat_get_peak"
,
memory
::
StatGetPeakValue
);
m
.
def
(
"run_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
-
1
,
int
sleep_inter
=
-
1
)
->
const
std
::
string
{
return
paddle
::
framework
::
shell_get_command_output
(
cmd
,
time_out
,
sleep_inter
);
},
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
-
1
,
py
::
arg
(
"sleep_inter"
)
=
-
1
);
m
.
def
(
"shell_execute_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
0
,
int
sleep_inter
=
0
,
bool
redirect_stderr
=
false
)
->
std
::
vector
<
std
::
string
>
{
return
paddle
::
framework
::
shell_execute_cmd
(
cmd
,
time_out
,
sleep_inter
,
redirect_stderr
);
},
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
0
,
py
::
arg
(
"sleep_inter"
)
=
0
,
py
::
arg
(
"redirect_stderr"
)
=
false
);
m
.
def
(
"run_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
-
1
,
int
sleep_inter
=
-
1
)
->
const
std
::
string
{
return
paddle
::
framework
::
shell_get_command_output
(
cmd
,
time_out
,
sleep_inter
);
},
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
-
1
,
py
::
arg
(
"sleep_inter"
)
=
-
1
);
m
.
def
(
"shell_execute_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
0
,
int
sleep_inter
=
0
,
bool
redirect_stderr
=
false
)
->
std
::
vector
<
std
::
string
>
{
return
paddle
::
framework
::
shell_execute_cmd
(
cmd
,
time_out
,
sleep_inter
,
redirect_stderr
);
},
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
0
,
py
::
arg
(
"sleep_inter"
)
=
0
,
py
::
arg
(
"redirect_stderr"
)
=
false
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
m
.
def
(
"is_float16_supported"
,
[](
const
platform
::
CUDAPlace
&
place
)
->
bool
{
...
...
@@ -2979,13 +3147,16 @@ All parameter, weight, gradient are variables in Paddle.
#endif
m
.
def
(
"set_feed_variable"
,
static_cast
<
void
(
*
)(
Scope
*
,
const
LoDTensor
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
static_cast
<
void
(
*
)(
Scope
*
,
const
LoDTensor
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
m
.
def
(
"set_feed_variable"
,
static_cast
<
void
(
*
)(
Scope
*
,
const
Strings
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
static_cast
<
void
(
*
)(
Scope
*
,
const
Strings
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
m
.
def
(
"get_fetch_variable"
,
[](
const
Scope
&
scope
,
const
std
::
string
&
var_name
,
[](
const
Scope
&
scope
,
const
std
::
string
&
var_name
,
size_t
index
)
->
py
::
object
{
auto
&
var
=
framework
::
GetFetchVariable
(
scope
,
var_name
,
index
);
if
(
data_is_lod_tensor
(
var
))
{
...
...
@@ -3033,26 +3204,30 @@ All parameter, weight, gradient are variables in Paddle.
pylodtensorarray
.
def
(
"__init__"
,
[](
LoDTensorArray
&
instance
)
{
new
(
&
instance
)
LoDTensorArray
();
})
.
def
(
"__getitem__"
,
[](
LoDTensorArray
&
self
,
size_t
i
)
{
return
&
self
.
at
(
i
);
},
py
::
return_value_policy
::
reference
)
.
def
(
"__getitem__"
,
[](
LoDTensorArray
&
self
,
size_t
i
)
{
return
&
self
.
at
(
i
);
},
py
::
return_value_policy
::
reference
)
.
def
(
"__len__"
,
[](
LoDTensorArray
&
self
)
{
return
self
.
size
();
})
.
def
(
"__setitem__"
,
[](
LoDTensorArray
&
self
,
size_t
i
,
const
LoDTensor
&
t
)
{
PADDLE_ENFORCE_LT
(
i
,
self
.
size
(),
PADDLE_ENFORCE_LT
(
i
,
self
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The index to set is larger than the size "
"of LoDTensorArray."
));
self
[
i
].
ShareDataWith
(
t
);
self
[
i
].
set_lod
(
t
.
lod
());
})
.
def
(
"append"
,
[](
LoDTensorArray
&
self
,
const
LoDTensor
&
t
)
{
self
.
emplace_back
();
self
.
back
().
ShareDataWith
(
t
);
self
.
back
().
set_lod
(
t
.
lod
());
},
py
::
arg
(
"tensor"
),
R"DOC(
.
def
(
"append"
,
[](
LoDTensorArray
&
self
,
const
LoDTensor
&
t
)
{
self
.
emplace_back
();
self
.
back
().
ShareDataWith
(
t
);
self
.
back
().
set_lod
(
t
.
lod
());
},
py
::
arg
(
"tensor"
),
R"DOC(
Append a LoDensor to LoDTensorArray.
Args:
...
...
@@ -3072,89 +3247,94 @@ All parameter, weight, gradient are variables in Paddle.
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
arr.append(t)
)DOC"
)
.
def
(
"_move_to_list"
,
[](
LoDTensorArray
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
res
[
i
]
=
py
::
cast
(
std
::
move
(
self
[
i
]));
}
self
.
clear
();
return
res
;
},
py
::
return_value_policy
::
take_ownership
);
.
def
(
"_move_to_list"
,
[](
LoDTensorArray
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
res
[
i
]
=
py
::
cast
(
std
::
move
(
self
[
i
]));
}
self
.
clear
();
return
res
;
},
py
::
return_value_policy
::
take_ownership
);
py
::
class_
<
FetchList
>
(
m
,
"FetchList"
,
R"DOC( FetchList is a
vector of boost::variant<LoDTensor, LoDTensorArray>.
)DOC"
)
.
def
(
"_move_to_list"
,
[](
FetchList
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
if
(
data_is_lod_tensor
(
self
[
i
]))
{
auto
&
data
=
BOOST_GET
(
LoDTensor
,
self
[
i
]);
res
[
i
]
=
py
::
cast
(
std
::
move
(
data
));
}
else
{
auto
&
data
=
BOOST_GET
(
LoDTensorArray
,
self
[
i
]);
py
::
list
tmp
(
data
.
size
());
for
(
size_t
j
=
0
;
j
<
data
.
size
();
++
j
)
{
tmp
[
j
]
=
py
::
cast
(
std
::
move
(
data
[
j
]));
}
res
[
i
]
=
std
::
move
(
tmp
);
}
}
self
.
clear
();
return
res
;
},
py
::
return_value_policy
::
take_ownership
)
.
def
(
"_move_to_list"
,
[](
FetchList
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
if
(
data_is_lod_tensor
(
self
[
i
]))
{
auto
&
data
=
BOOST_GET
(
LoDTensor
,
self
[
i
]);
res
[
i
]
=
py
::
cast
(
std
::
move
(
data
));
}
else
{
auto
&
data
=
BOOST_GET
(
LoDTensorArray
,
self
[
i
]);
py
::
list
tmp
(
data
.
size
());
for
(
size_t
j
=
0
;
j
<
data
.
size
();
++
j
)
{
tmp
[
j
]
=
py
::
cast
(
std
::
move
(
data
[
j
]));
}
res
[
i
]
=
std
::
move
(
tmp
);
}
}
self
.
clear
();
return
res
;
},
py
::
return_value_policy
::
take_ownership
)
.
def
(
"append"
,
[](
FetchList
&
self
,
const
LoDTensor
&
t
)
{
self
.
emplace_back
();
auto
&
lod_tensor
=
BOOST_GET
(
LoDTensor
,
self
.
back
());
lod_tensor
.
ShareDataWith
(
t
);
lod_tensor
.
set_lod
(
t
.
lod
());
},
py
::
arg
(
"var"
))
.
def
(
"append"
,
[](
FetchList
&
self
,
const
LoDTensorArray
&
t
)
{
self
.
emplace_back
();
auto
&
lod_tensor_array
=
BOOST_GET
(
LoDTensorArray
,
self
.
back
());
for
(
size_t
i
=
0
;
i
<
t
.
size
();
++
i
)
{
lod_tensor_array
[
i
].
ShareDataWith
(
t
[
i
]);
lod_tensor_array
[
i
].
set_lod
(
t
[
i
].
lod
());
}
},
py
::
arg
(
"var"
));
.
def
(
"append"
,
[](
FetchList
&
self
,
const
LoDTensor
&
t
)
{
self
.
emplace_back
();
auto
&
lod_tensor
=
BOOST_GET
(
LoDTensor
,
self
.
back
());
lod_tensor
.
ShareDataWith
(
t
);
lod_tensor
.
set_lod
(
t
.
lod
());
},
py
::
arg
(
"var"
))
.
def
(
"append"
,
[](
FetchList
&
self
,
const
LoDTensorArray
&
t
)
{
self
.
emplace_back
();
auto
&
lod_tensor_array
=
BOOST_GET
(
LoDTensorArray
,
self
.
back
());
for
(
size_t
i
=
0
;
i
<
t
.
size
();
++
i
)
{
lod_tensor_array
[
i
].
ShareDataWith
(
t
[
i
]);
lod_tensor_array
[
i
].
set_lod
(
t
[
i
].
lod
());
}
},
py
::
arg
(
"var"
));
py
::
class_
<
FetchUnmergedList
>
(
m
,
"FetchUnmergedList"
,
R"DOC(
FetchUnmergedList is 2-D array of FetchType(boost::variant(LoDTensor, LoDTensorArray)).
)DOC"
)
.
def
(
"_move_to_list"
,
[](
FetchUnmergedList
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
py
::
list
tmp
(
self
[
i
].
size
());
for
(
size_t
j
=
0
;
j
<
self
[
i
].
size
();
++
j
)
{
if
(
data_is_lod_tensor
(
self
[
i
][
j
]))
{
auto
&
var
=
BOOST_GET
(
LoDTensor
,
self
[
i
][
j
]);
tmp
[
j
]
=
py
::
cast
(
std
::
move
(
var
));
}
else
{
auto
&
var
=
BOOST_GET
(
LoDTensorArray
,
self
[
i
][
j
]);
py
::
list
tmp_array
(
var
.
size
());
for
(
size_t
k
=
0
;
k
<
var
.
size
();
++
k
)
{
tmp_array
[
k
]
=
std
::
move
(
var
[
k
]);
}
tmp
[
j
]
=
std
::
move
(
tmp_array
);
}
}
res
[
i
]
=
std
::
move
(
tmp
);
self
[
i
].
clear
();
}
self
.
clear
();
return
res
;
},
py
::
return_value_policy
::
take_ownership
);
.
def
(
"_move_to_list"
,
[](
FetchUnmergedList
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
py
::
list
tmp
(
self
[
i
].
size
());
for
(
size_t
j
=
0
;
j
<
self
[
i
].
size
();
++
j
)
{
if
(
data_is_lod_tensor
(
self
[
i
][
j
]))
{
auto
&
var
=
BOOST_GET
(
LoDTensor
,
self
[
i
][
j
]);
tmp
[
j
]
=
py
::
cast
(
std
::
move
(
var
));
}
else
{
auto
&
var
=
BOOST_GET
(
LoDTensorArray
,
self
[
i
][
j
]);
py
::
list
tmp_array
(
var
.
size
());
for
(
size_t
k
=
0
;
k
<
var
.
size
();
++
k
)
{
tmp_array
[
k
]
=
std
::
move
(
var
[
k
]);
}
tmp
[
j
]
=
std
::
move
(
tmp_array
);
}
}
res
[
i
]
=
std
::
move
(
tmp
);
self
[
i
].
clear
();
}
self
.
clear
();
return
res
;
},
py
::
return_value_policy
::
take_ownership
);
m
.
def
(
"op_support_gpu"
,
OpSupportGPU
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...
...
@@ -3168,11 +3348,12 @@ All parameter, weight, gradient are variables in Paddle.
}
platform
::
EmptyCache
();
});
m
.
def
(
"get_device_properties"
,
[](
int
id
)
->
const
gpuDeviceProp
&
{
return
platform
::
GetDeviceProperties
(
id
);
},
py
::
return_value_policy
::
copy
);
m
.
def
(
"get_device_properties"
,
[](
int
id
)
->
const
gpuDeviceProp
&
{
return
platform
::
GetDeviceProperties
(
id
);
},
py
::
return_value_policy
::
copy
);
py
::
class_
<
gpuDeviceProp
>
(
m
,
"_gpuDeviceProperties"
)
.
def_property_readonly
(
...
...
@@ -3283,18 +3464,20 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"reset_profiler"
,
platform
::
ResetProfiler
);
m
.
def
(
"register_pass"
,
[](
const
std
::
string
&
pass_type
,
py
::
object
callable
)
{
PADDLE_ENFORCE_EQ
(
framework
::
ir
::
PassRegistry
::
Instance
().
Has
(
pass_type
),
false
,
framework
::
ir
::
PassRegistry
::
Instance
().
Has
(
pass_type
),
false
,
platform
::
errors
::
AlreadyExists
(
"Pass '%s' is registered more than "
"once. Please use another name."
,
pass_type
));
callable
.
inc_ref
();
framework
::
ir
::
PassRegistry
::
Instance
().
Insert
(
pass_type
,
[
pass_type
,
callable
]()
{
py
::
gil_scoped_acquire
guard
;
std
::
unique_ptr
<
framework
::
ir
::
Pass
>
pass
(
new
framework
::
ir
::
GeneratePass
(
py
::
cast
<
std
::
string
>
(
callable
())));
return
pass
;
});
framework
::
ir
::
PassRegistry
::
Instance
().
Insert
(
pass_type
,
[
pass_type
,
callable
]()
{
py
::
gil_scoped_acquire
guard
;
std
::
unique_ptr
<
framework
::
ir
::
Pass
>
pass
(
new
framework
::
ir
::
GeneratePass
(
py
::
cast
<
std
::
string
>
(
callable
())));
return
pass
;
});
});
m
.
def
(
"get_pass"
,
[](
const
std
::
string
&
pass_type
)
{
auto
pass
=
framework
::
ir
::
PassRegistry
::
Instance
().
Get
(
pass_type
);
...
...
@@ -3304,7 +3487,8 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"size_of_dtype"
,
framework
::
SizeOfType
);
py
::
class_
<
paddle
::
platform
::
ProfilerResult
>
(
m
,
"_ProfilerResult"
)
.
def
(
py
::
init
<>
())
.
def
(
"get_data"
,
&
paddle
::
platform
::
ProfilerResult
::
GetData
,
.
def
(
"get_data"
,
&
paddle
::
platform
::
ProfilerResult
::
GetData
,
py
::
return_value_policy
::
automatic_reference
)
.
def
(
"save"
,
&
paddle
::
platform
::
ProfilerResult
::
Save
)
.
def
(
"get_extra_info"
,
&
paddle
::
platform
::
ProfilerResult
::
GetExtraInfo
);
...
...
@@ -3339,7 +3523,8 @@ All parameter, weight, gradient are variables in Paddle.
&
paddle
::
platform
::
HostPythonNode
::
device_node_ptrs
);
py
::
class_
<
paddle
::
platform
::
Profiler
>
(
m
,
"_Profiler"
)
.
def
(
"create"
,
&
paddle
::
platform
::
Profiler
::
Create
,
.
def
(
"create"
,
&
paddle
::
platform
::
Profiler
::
Create
,
py
::
return_value_policy
::
take_ownership
)
.
def
(
"is_cupti_supported"
,
&
paddle
::
platform
::
Profiler
::
IsCuptiSupported
)
.
def
(
"is_cnpapi_supported"
,
...
...
@@ -3350,12 +3535,13 @@ All parameter, weight, gradient are variables in Paddle.
profiler
->
Prepare
();
})
.
def
(
"start"
,
&
paddle
::
platform
::
Profiler
::
Start
)
.
def
(
"stop"
,
[](
paddle
::
platform
::
Profiler
*
profiler
)
{
platform
::
DisableHostEventRecorder
();
return
profiler
->
Stop
();
},
py
::
return_value_policy
::
automatic_reference
);
.
def
(
"stop"
,
[](
paddle
::
platform
::
Profiler
*
profiler
)
{
platform
::
DisableHostEventRecorder
();
return
profiler
->
Stop
();
},
py
::
return_value_policy
::
automatic_reference
);
py
::
class_
<
paddle
::
platform
::
ProfilerOptions
>
(
m
,
"ProfilerOptions"
)
.
def
(
py
::
init
<>
())
...
...
@@ -3412,22 +3598,29 @@ All parameter, weight, gradient are variables in Paddle.
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
const
std
::
string
&
attr
)
{
self
.
Set
<
std
::
string
>
(
name
,
new
std
::
string
(
attr
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
bool
val
)
{
self
.
Set
<
bool
>
(
name
,
new
bool
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
int
val
)
{
self
.
Set
<
const
int
>
(
name
,
new
int
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
bool
val
)
{
self
.
Set
<
bool
>
(
name
,
new
bool
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
int
val
)
{
self
.
Set
<
const
int
>
(
name
,
new
int
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
vector
<
std
::
string
>
set
)
{
self
.
Set
(
name
,
new
std
::
vector
<
std
::
string
>
(
set
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
unordered_set
<
std
::
string
>
set
)
{
self
.
Set
(
name
,
new
std
::
unordered_set
<
std
::
string
>
(
set
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
unordered_set
<
int
>
set
)
{
self
.
Set
(
name
,
new
std
::
unordered_set
<
int
>
(
set
));
})
...
...
@@ -3604,11 +3797,12 @@ All parameter, weight, gradient are variables in Paddle.
},
R"DOC(This config that the this is distributed training with parameter server
)DOC"
)
.
def_property
(
"_dry_run"
,
[](
const
ExecutionStrategy
&
self
)
{
return
self
.
dry_run_
;
},
[](
ExecutionStrategy
&
self
,
bool
dry_run
)
{
self
.
dry_run_
=
dry_run
;
});
.
def_property
(
"_dry_run"
,
[](
const
ExecutionStrategy
&
self
)
{
return
self
.
dry_run_
;
},
[](
ExecutionStrategy
&
self
,
bool
dry_run
)
{
self
.
dry_run_
=
dry_run
;
});
exec_strategy
.
def_property
(
"use_experimental_executor"
,
...
...
@@ -3671,7 +3865,8 @@ All parameter, weight, gradient are variables in Paddle.
"reduce_strategy"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
reduce_
;
},
[](
BuildStrategy
&
self
,
BuildStrategy
::
ReduceStrategy
strategy
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3701,7 +3896,8 @@ All parameter, weight, gradient are variables in Paddle.
[](
const
BuildStrategy
&
self
)
{
return
self
.
gradient_scale_
;
},
[](
BuildStrategy
&
self
,
BuildStrategy
::
GradientScaleStrategy
strategy
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3766,7 +3962,8 @@ All parameter, weight, gradient are variables in Paddle.
"debug_graphviz_path"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
debug_graphviz_path_
;
},
[](
BuildStrategy
&
self
,
const
std
::
string
&
path
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3793,7 +3990,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
enable_sequential_execution_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3819,7 +4017,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
remove_unnecessary_lock_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3856,11 +4055,12 @@ All parameter, weight, gradient are variables in Paddle.
const
std
::
vector
<
std
::
string
>
&
trainers_endpoints
)
{
self
.
trainers_endpoints_
=
trainers_endpoints
;
})
.
def_property
(
"trainer_id"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
trainer_id_
;
},
[](
BuildStrategy
&
self
,
int
trainer_id
)
{
self
.
trainer_id_
=
trainer_id
;
})
.
def_property
(
"trainer_id"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
trainer_id_
;
},
[](
BuildStrategy
&
self
,
int
trainer_id
)
{
self
.
trainer_id_
=
trainer_id
;
})
.
def_property
(
"nccl_comm_num"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
nccl_comm_num_
;
},
...
...
@@ -3873,20 +4073,22 @@ All parameter, weight, gradient are variables in Paddle.
[](
BuildStrategy
&
self
,
int
bkcl_comm_num
)
{
self
.
bkcl_comm_num_
=
bkcl_comm_num
;
})
.
def_property
(
"use_hierarchical_allreduce"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
use_hierarchical_allreduce_
;
},
[](
BuildStrategy
&
self
,
bool
use
)
{
self
.
use_hierarchical_allreduce_
=
use
;
})
.
def_property
(
"hierarchical_allreduce_inter_nranks"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
hierarchical_allreduce_inter_nranks_
;
},
[](
BuildStrategy
&
self
,
int
nranks
)
{
self
.
hierarchical_allreduce_inter_nranks_
=
nranks
;
})
.
def_property
(
"use_hierarchical_allreduce"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
use_hierarchical_allreduce_
;
},
[](
BuildStrategy
&
self
,
bool
use
)
{
self
.
use_hierarchical_allreduce_
=
use
;
})
.
def_property
(
"hierarchical_allreduce_inter_nranks"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
hierarchical_allreduce_inter_nranks_
;
},
[](
BuildStrategy
&
self
,
int
nranks
)
{
self
.
hierarchical_allreduce_inter_nranks_
=
nranks
;
})
.
def_property
(
"fuse_elewise_add_act_ops"
,
...
...
@@ -3894,7 +4096,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
fuse_elewise_add_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3919,7 +4122,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_gemm_epilogue"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_gemm_epilogue_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3944,7 +4148,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_bn_act_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3969,7 +4174,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_bn_add_act_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_add_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -3994,7 +4200,8 @@ All parameter, weight, gradient are variables in Paddle.
"enable_auto_fusion"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_auto_fusion_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -4022,7 +4229,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
fuse_relu_depthwise_conv_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -4045,19 +4253,21 @@ All parameter, weight, gradient are variables in Paddle.
build_strategy = static.BuildStrategy()
build_strategy.fuse_relu_depthwise_conv = True
)DOC"
)
.
def_property
(
"fuse_broadcast_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_broadcast_ops_
==
true
||
self
.
fuse_broadcast_ops_
==
paddle
::
none
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, "
"cannot be configured again."
));
self
.
fuse_broadcast_ops_
=
b
;
},
R"DOC((bool, optional): fuse_broadcast_op indicates whether
.
def_property
(
"fuse_broadcast_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_broadcast_ops_
==
true
||
self
.
fuse_broadcast_ops_
==
paddle
::
none
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, "
"cannot be configured again."
));
self
.
fuse_broadcast_ops_
=
b
;
},
R"DOC((bool, optional): fuse_broadcast_op indicates whether
to fuse the broadcast ops. Note that, in Reduce mode,
fusing broadcast ops may make the program faster. Because
fusing broadcast OP equals delaying the execution of all
...
...
@@ -4075,23 +4285,26 @@ All parameter, weight, gradient are variables in Paddle.
build_strategy = static.BuildStrategy()
build_strategy.fuse_broadcast_ops = True
)DOC"
)
.
def_property
(
"fuse_all_optimizer_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_all_optimizer_ops_
==
true
||
self
.
fuse_all_optimizer_ops_
==
paddle
::
none
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, "
"cannot be configured again."
));
self
.
fuse_all_optimizer_ops_
=
b
;
})
.
def_property
(
"fuse_all_optimizer_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_all_optimizer_ops_
==
true
||
self
.
fuse_all_optimizer_ops_
==
paddle
::
none
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, "
"cannot be configured again."
));
self
.
fuse_all_optimizer_ops_
=
b
;
})
.
def_property
(
"sync_batch_norm"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
sync_batch_norm_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
...
...
@@ -4169,9 +4382,10 @@ All parameter, weight, gradient are variables in Paddle.
self
.
is_distribution_
=
b
;
#endif
})
.
def_property
(
"async_mode"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
async_mode_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
async_mode_
=
b
;
})
.
def_property
(
"async_mode"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
async_mode_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
async_mode_
=
b
;
})
.
def_property
(
"enable_inplace"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_inplace_
;
},
...
...
@@ -4187,13 +4401,14 @@ All parameter, weight, gradient are variables in Paddle.
self
.
fuse_all_reduce_ops_
==
paddle
::
none
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
fuse_all_reduce_ops_
=
b
;
})
.
def_property
(
"enable_backward_optimizer_op_deps"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_backward_optimizer_op_deps_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
enable_backward_optimizer_op_deps_
=
b
;
})
.
def_property
(
"enable_backward_optimizer_op_deps"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_backward_optimizer_op_deps_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
enable_backward_optimizer_op_deps_
=
b
;
})
.
def_property
(
"cache_runtime_context"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
cache_runtime_context_
;
},
...
...
@@ -4213,24 +4428,26 @@ All parameter, weight, gradient are variables in Paddle.
[](
BuildStrategy
&
self
,
bool
fix_op_run_order
)
{
self
.
fix_op_run_order_
=
fix_op_run_order
;
})
.
def_property
(
"allow_cuda_graph_capture"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
allow_cuda_graph_capture_
;
},
[](
BuildStrategy
&
self
,
bool
allow_cuda_graph_capture
)
{
self
.
allow_cuda_graph_capture_
=
allow_cuda_graph_capture
;
})
.
def_property
(
"allow_cuda_graph_capture"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
allow_cuda_graph_capture_
;
},
[](
BuildStrategy
&
self
,
bool
allow_cuda_graph_capture
)
{
self
.
allow_cuda_graph_capture_
=
allow_cuda_graph_capture
;
})
.
def
(
"_copy"
,
[](
const
BuildStrategy
&
self
)
{
auto
new_bs
=
self
;
new_bs
.
ClearFinalized
();
return
new_bs
;
})
.
def
(
"_finalize_strategy_and_create_passes"
,
[](
BuildStrategy
&
self
)
->
std
::
shared_ptr
<
ir
::
PassBuilder
>
{
return
self
.
CreatePassesFromStrategy
(
true
);
},
R"DOC(Allow user to customized passes. Normally model-specific
.
def
(
"_finalize_strategy_and_create_passes"
,
[](
BuildStrategy
&
self
)
->
std
::
shared_ptr
<
ir
::
PassBuilder
>
{
return
self
.
CreatePassesFromStrategy
(
true
);
},
R"DOC(Allow user to customized passes. Normally model-specific
optimization passes should be defined in this way. BuildStrategy
cannot be updated after being finalized.)DOC"
);
...
...
@@ -4241,18 +4458,23 @@ All parameter, weight, gradient are variables in Paddle.
});
pe
.
def
(
py
::
init
<
const
std
::
vector
<
platform
::
Place
>
&
,
const
std
::
vector
<
std
::
string
>
&
,
const
std
::
string
&
,
Scope
*
,
std
::
vector
<
Scope
*>
&
,
const
ExecutionStrategy
&
,
const
BuildStrategy
&
,
ir
::
Graph
*>
())
const
std
::
vector
<
std
::
string
>
&
,
const
std
::
string
&
,
Scope
*
,
std
::
vector
<
Scope
*>
&
,
const
ExecutionStrategy
&
,
const
BuildStrategy
&
,
ir
::
Graph
*>
())
// NOTE: even we return a vec<Scope*>* to Python use reference policy.
// We still cannot get local_scope from this vector, since the element
// of vec<Scope*> will be freed by Python GC. We can only return Scope*
// one by one and mark them as reference.
.
def
(
"local_scopes"
,
[](
ParallelExecutor
&
self
)
->
std
::
vector
<
Scope
*>
*
{
return
&
self
.
GetLocalScopes
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"local_scopes"
,
[](
ParallelExecutor
&
self
)
->
std
::
vector
<
Scope
*>
*
{
return
&
self
.
GetLocalScopes
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"drop_local_exe_scopes"
,
&
ParallelExecutor
::
DropLocalExeScopes
)
.
def
(
"_need_create_local_exe_scopes"
,
&
ParallelExecutor
::
NeedCreateLocalExeScope
)
...
...
@@ -4284,12 +4506,13 @@ All parameter, weight, gradient are variables in Paddle.
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>>
(
m
,
"IpuBackend"
)
// manage IpuBackend in C++
.
def
(
"get_instance"
,
[]()
{
return
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>
(
platform
::
ipu
::
IpuBackend
::
GetInstance
());
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_instance"
,
[]()
{
return
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>
(
platform
::
ipu
::
IpuBackend
::
GetInstance
());
},
py
::
return_value_policy
::
reference
)
.
def
(
"weights_to_host"
,
&
platform
::
ipu
::
IpuBackend
::
WeightsToHost
)
.
def
(
"detach"
,
&
platform
::
ipu
::
IpuBackend
::
Detach
)
.
def
(
"reset"
,
&
platform
::
ipu
::
IpuBackend
::
Reset
)
...
...
@@ -4330,7 +4553,8 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Failed to convert type: %s when set IpuStrategy "
"option: %s"
,
option
.
get_type
(),
option_name
));
option
.
get_type
(),
option_name
));
}
self
.
InsertStringOption
(
option_name
,
option_val
);
}
...
...
@@ -4338,7 +4562,8 @@ All parameter, weight, gradient are variables in Paddle.
if
(
option_name
.
rfind
(
"location_"
,
0
)
==
0
)
{
for
(
auto
option
:
element
.
second
.
cast
<
py
::
dict
>
())
{
self
.
SetTensorLocation
(
option_name
,
option
.
first
.
cast
<
std
::
string
>
(),
option_name
,
option
.
first
.
cast
<
std
::
string
>
(),
option
.
second
.
cast
<
std
::
uint64_t
>
());
}
}
else
if
(
option_name
==
"accumulate_outer_fragment"
)
{
...
...
@@ -4386,17 +4611,19 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Failed to convert value type: %s when set "
"IpuStrategy option: %s"
,
option
.
second
.
get_type
(),
option_key
));
option
.
second
.
get_type
(),
option_key
));
}
self
.
InsertStringPairOption
(
option_name
,
option_key
,
option_val
);
self
.
InsertStringPairOption
(
option_name
,
option_key
,
option_val
);
}
}
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Invalid IpuStrategy option value type: %s, please check "
"input value for option: %s"
,
element
.
second
.
get_type
(),
option_name
));
element
.
second
.
get_type
(),
option_name
));
}
}
})
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录