Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
69e82d83
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
69e82d83
编写于
6月 29, 2022
作者:
R
ronnywang
提交者:
GitHub
6月 29, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cherry pick 43890 (#43892)
* cherry pick 43890
上级
dc12605d
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
830 addition
and
603 deletion
+830
-603
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+830
-603
未找到文件。
paddle/fluid/pybind/pybind.cc
浏览文件 @
69e82d83
...
@@ -372,7 +372,8 @@ static T PyObjectCast(PyObject *obj) {
...
@@ -372,7 +372,8 @@ static T PyObjectCast(PyObject *obj) {
}
catch
(
py
::
cast_error
&
)
{
}
catch
(
py
::
cast_error
&
)
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Python object is not type of %s, the real type is %s"
,
"Python object is not type of %s, the real type is %s"
,
typeid
(
T
).
name
(),
obj
->
ob_type
->
tp_name
));
typeid
(
T
).
name
(),
obj
->
ob_type
->
tp_name
));
}
}
}
}
...
@@ -431,7 +432,8 @@ static std::vector<std::string> inline GetNameList(
...
@@ -431,7 +432,8 @@ static std::vector<std::string> inline GetNameList(
}
}
static
void
inline
CreateVariableIfNotExit
(
static
void
inline
CreateVariableIfNotExit
(
const
py
::
handle
&
py_handle
,
const
framework
::
Scope
&
scope
,
const
py
::
handle
&
py_handle
,
const
framework
::
Scope
&
scope
,
const
framework
::
Executor
*
exe
=
nullptr
)
{
const
framework
::
Executor
*
exe
=
nullptr
)
{
std
::
vector
<
std
::
string
>
vec_res
;
std
::
vector
<
std
::
string
>
vec_res
;
...
@@ -469,8 +471,9 @@ static void inline CreateVariableIfNotExit(
...
@@ -469,8 +471,9 @@ static void inline CreateVariableIfNotExit(
PyObject
*
py_var_desc
=
PyObject
*
py_var_desc
=
PyObject_GetAttrString
(
PyList_GET_ITEM
(
py_obj
,
i
),
kVarDescField
);
PyObject_GetAttrString
(
PyList_GET_ITEM
(
py_obj
,
i
),
kVarDescField
);
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
py_var_desc
,
platform
::
errors
::
InvalidArgument
(
py_var_desc
,
"The var_desc of parameter to set is None"
));
platform
::
errors
::
InvalidArgument
(
"The var_desc of parameter to set is None"
));
auto
var_desc
=
PyObjectCast
<
framework
::
VarDesc
>
(
py_var_desc
);
auto
var_desc
=
PyObjectCast
<
framework
::
VarDesc
>
(
py_var_desc
);
Py_DECREF
(
py_var_desc
);
Py_DECREF
(
py_var_desc
);
var
=
const_cast
<
framework
::
Scope
*>
(
&
scope
)
->
Var
(
para_name
);
var
=
const_cast
<
framework
::
Scope
*>
(
&
scope
)
->
Var
(
para_name
);
...
@@ -505,7 +508,8 @@ static void AssertStaticGraphAndDygraphGradMakerNoDiff() {
...
@@ -505,7 +508,8 @@ static void AssertStaticGraphAndDygraphGradMakerNoDiff() {
}
}
}
}
}
}
PADDLE_ENFORCE_EQ
(
ops
.
empty
(),
true
,
PADDLE_ENFORCE_EQ
(
ops
.
empty
(),
true
,
platform
::
errors
::
Unimplemented
(
platform
::
errors
::
Unimplemented
(
"OperatorWithKernel [%s] have only static graph grad "
"OperatorWithKernel [%s] have only static graph grad "
"maker or have only dygraph grad maker, which is not "
"maker or have only dygraph grad maker, which is not "
...
@@ -527,8 +531,10 @@ static int GetNCCLVersion() {
...
@@ -527,8 +531,10 @@ static int GetNCCLVersion() {
#endif
#endif
template
<
typename
PlaceType
>
template
<
typename
PlaceType
>
static
void
TensorCopyFrom
(
framework
::
Tensor
*
dst
,
const
framework
::
Tensor
&
src
,
static
void
TensorCopyFrom
(
framework
::
Tensor
*
dst
,
const
PlaceType
&
place
,
int64_t
batch_size
)
{
const
framework
::
Tensor
&
src
,
const
PlaceType
&
place
,
int64_t
batch_size
)
{
if
(
batch_size
<
0
)
{
if
(
batch_size
<
0
)
{
framework
::
TensorCopy
(
src
,
place
,
dst
);
framework
::
TensorCopy
(
src
,
place
,
dst
);
}
else
{
}
else
{
...
@@ -612,9 +618,10 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -612,9 +618,10 @@ PYBIND11_MODULE(core_noavx, m) {
PyCapsule_GetPointer
(
dltensor
->
ptr
(),
"dltensor"
));
PyCapsule_GetPointer
(
dltensor
->
ptr
(),
"dltensor"
));
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
dmt
,
platform
::
errors
::
InvalidArgument
(
dmt
,
"from_dlpack received an invalid capsule. "
platform
::
errors
::
InvalidArgument
(
"Note that a DLPack tensor can be consumed only once."
));
"from_dlpack received an invalid capsule. "
"Note that a DLPack tensor can be consumed only once."
));
PyCapsule_SetName
(
dltensor
->
ptr
(),
"used_dltensor"
);
PyCapsule_SetName
(
dltensor
->
ptr
(),
"used_dltensor"
);
DLTensor
dl
=
dmt
->
dl_tensor
;
DLTensor
dl
=
dmt
->
dl_tensor
;
...
@@ -632,7 +639,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -632,7 +639,8 @@ PYBIND11_MODULE(core_noavx, m) {
});
});
m
.
def
(
"_create_loaded_parameter"
,
m
.
def
(
"_create_loaded_parameter"
,
[](
const
py
::
handle
&
vec_var_list
,
const
Scope
&
scope
,
[](
const
py
::
handle
&
vec_var_list
,
const
Scope
&
scope
,
const
Executor
*
executor
)
{
const
Executor
*
executor
)
{
CreateVariableIfNotExit
(
vec_var_list
,
scope
,
executor
);
CreateVariableIfNotExit
(
vec_var_list
,
scope
,
executor
);
});
});
...
@@ -670,11 +678,12 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -670,11 +678,12 @@ PYBIND11_MODULE(core_noavx, m) {
<<
", sci_mode="
<<
print_opt
.
sci_mode
;
<<
", sci_mode="
<<
print_opt
.
sci_mode
;
});
});
m
.
def
(
"broadcast_shape"
,
[](
const
std
::
vector
<
int64_t
>
&
x_dim
,
m
.
def
(
const
std
::
vector
<
int64_t
>
&
y_dim
)
{
"broadcast_shape"
,
return
phi
::
vectorize
(
operators
::
details
::
BroadcastTwoDims
(
[](
const
std
::
vector
<
int64_t
>
&
x_dim
,
const
std
::
vector
<
int64_t
>
&
y_dim
)
{
phi
::
make_ddim
(
x_dim
),
phi
::
make_ddim
(
y_dim
),
-
1
));
return
phi
::
vectorize
(
operators
::
details
::
BroadcastTwoDims
(
});
phi
::
make_ddim
(
x_dim
),
phi
::
make_ddim
(
y_dim
),
-
1
));
});
m
.
def
(
m
.
def
(
"_append_python_callable_object_and_return_id"
,
"_append_python_callable_object_and_return_id"
,
...
@@ -685,56 +694,56 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -685,56 +694,56 @@ PYBIND11_MODULE(core_noavx, m) {
m
.
def
(
"_get_use_default_grad_op_desc_maker_ops"
,
m
.
def
(
"_get_use_default_grad_op_desc_maker_ops"
,
[]
{
return
OpInfoMap
::
Instance
().
GetUseDefaultGradOpDescMakerOps
();
});
[]
{
return
OpInfoMap
::
Instance
().
GetUseDefaultGradOpDescMakerOps
();
});
m
.
def
(
"_get_all_register_op_kernels"
,
m
.
def
(
[](
const
std
::
string
&
lib
)
{
"_get_all_register_op_kernels"
,
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
[](
const
std
::
string
&
lib
)
{
all_kernels_info
;
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
if
(
lib
==
"fluid"
||
lib
==
"all"
)
{
all_kernels_info
;
auto
&
all_kernels
=
if
(
lib
==
"fluid"
||
lib
==
"all"
)
{
paddle
::
framework
::
OperatorWithKernel
::
AllOpKernels
();
auto
&
all_kernels
=
paddle
::
framework
::
OperatorWithKernel
::
AllOpKernels
();
for
(
auto
&
kernel_pair
:
all_kernels
)
{
auto
op_type
=
kernel_pair
.
first
;
for
(
auto
&
kernel_pair
:
all_kernels
)
{
std
::
vector
<
std
::
string
>
kernel_types
;
auto
op_type
=
kernel_pair
.
first
;
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
std
::
vector
<
std
::
string
>
kernel_types
;
paddle
::
framework
::
OpKernelType
kernel_type
=
info_pair
.
first
;
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
kernel_types
.
emplace_back
(
paddle
::
framework
::
OpKernelType
kernel_type
=
info_pair
.
first
;
paddle
::
framework
::
KernelTypeToString
(
kernel_type
));
kernel_types
.
emplace_back
(
}
paddle
::
framework
::
KernelTypeToString
(
kernel_type
));
all_kernels_info
.
emplace
(
op_type
,
kernel_types
);
}
}
all_kernels_info
.
emplace
(
op_type
,
kernel_types
);
}
}
if
(
lib
==
"phi"
||
lib
==
"all"
)
{
}
auto
phi_kernels
=
phi
::
KernelFactory
::
Instance
().
kernels
();
if
(
lib
==
"phi"
||
lib
==
"all"
)
{
for
(
auto
&
kernel_pair
:
phi_kernels
)
{
auto
phi_kernels
=
phi
::
KernelFactory
::
Instance
().
kernels
();
auto
op_type
=
phi
::
TransToFluidOpName
(
kernel_pair
.
first
);
for
(
auto
&
kernel_pair
:
phi_kernels
)
{
std
::
vector
<
std
::
string
>
kernel_types
;
auto
op_type
=
phi
::
TransToFluidOpName
(
kernel_pair
.
first
);
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
std
::
vector
<
std
::
string
>
kernel_types
;
framework
::
OpKernelType
kernel_type
=
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
framework
::
TransPhiKernelKeyToOpKernelType
(
info_pair
.
first
);
framework
::
OpKernelType
kernel_type
=
auto
kernel_type_str
=
framework
::
TransPhiKernelKeyToOpKernelType
(
info_pair
.
first
);
framework
::
KernelTypeToString
(
kernel_type
);
auto
kernel_type_str
=
framework
::
KernelTypeToString
(
kernel_type
);
if
(
all_kernels_info
.
count
(
op_type
))
{
if
(
all_kernels_info
.
count
(
op_type
))
{
if
(
std
::
find
(
all_kernels_info
[
op_type
].
begin
(),
if
(
std
::
find
(
all_kernels_info
[
op_type
].
begin
(),
all_kernels_info
[
op_type
].
end
(),
all_kernels_info
[
op_type
].
end
(),
kernel_type_str
)
==
kernel_type_str
)
==
all_kernels_info
[
op_type
].
end
())
{
all_kernels_info
[
op_type
].
end
())
{
all_kernels_info
[
op_type
].
emplace_back
(
kernel_type_str
);
all_kernels_info
[
op_type
].
emplace_back
(
kernel_type_str
);
}
}
else
{
kernel_types
.
emplace_back
(
kernel_type_str
);
}
}
}
}
else
{
if
(
!
kernel_types
.
empty
())
{
kernel_types
.
emplace_back
(
kernel_type_str
);
all_kernels_info
.
emplace
(
op_type
,
kernel_types
);
}
}
}
}
if
(
!
kernel_types
.
empty
())
{
all_kernels_info
.
emplace
(
op_type
,
kernel_types
);
}
}
}
}
return
all_kernels_info
;
return
all_kernels_info
;
},
},
py
::
arg
(
"lib"
)
=
"all"
,
py
::
arg
(
"lib"
)
=
"all"
,
R"DOC(
R"DOC(
Return the registered kernels in paddle.
Return the registered kernels in paddle.
Args:
Args:
...
@@ -796,14 +805,22 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -796,14 +805,22 @@ PYBIND11_MODULE(core_noavx, m) {
self
.
EmplaceBackOutput
(
std
::
move
(
CastPyArg2Tensor
(
obj
,
1
)));
self
.
EmplaceBackOutput
(
std
::
move
(
CastPyArg2Tensor
(
obj
,
1
)));
}
}
})
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
.
def
(
"add_attr"
,
bool
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
[](
paddle
::
CustomOpKernelContext
&
self
,
bool
attr
)
{
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
self
.
EmplaceBackAttr
(
attr
);
int
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
.
def
(
"add_attr"
,
float
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
[](
paddle
::
CustomOpKernelContext
&
self
,
int
attr
)
{
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
self
.
EmplaceBackAttr
(
attr
);
int64_t
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
float
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int64_t
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
string
&
attr
)
{
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
string
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
self
.
EmplaceBackAttr
(
attr
);
...
@@ -817,13 +834,14 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -817,13 +834,14 @@ PYBIND11_MODULE(core_noavx, m) {
.
def
(
"add_attr"
,
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
vector
<
int64_t
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
const
std
::
vector
<
int64_t
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
.
def
(
"add_attr"
,
const
std
::
vector
<
std
::
string
>
&
attr
)
{
[](
paddle
::
CustomOpKernelContext
&
self
,
self
.
EmplaceBackAttr
(
attr
);
const
std
::
vector
<
std
::
string
>
&
attr
)
{
});
self
.
EmplaceBackAttr
(
attr
);
});
py
::
class_
<
framework
::
Tensor
>
framework_tensor
(
m
,
"Tensor"
,
py
::
class_
<
framework
::
Tensor
>
framework_tensor
(
py
::
buffer_protocol
());
m
,
"Tensor"
,
py
::
buffer_protocol
());
g_framework_tensor_pytype
=
g_framework_tensor_pytype
=
reinterpret_cast
<
PyTypeObject
*>
(
framework_tensor
.
ptr
());
reinterpret_cast
<
PyTypeObject
*>
(
framework_tensor
.
ptr
());
framework_tensor
framework_tensor
...
@@ -898,70 +916,118 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -898,70 +916,118 @@ PYBIND11_MODULE(core_noavx, m) {
self
.
mutable_data
<
float
>
(
place
);
self
.
mutable_data
<
float
>
(
place
);
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
XPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
XPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
MLUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
MLUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_clear"
,
&
framework
::
Tensor
::
clear
)
.
def
(
"_clear"
,
&
framework
::
Tensor
::
clear
)
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
NPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
NPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CPUPlace
>
,
.
def
(
"_copy_from"
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
&
TensorCopyFrom
<
paddle
::
platform
::
CPUPlace
>
,
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"place"
),
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
NPUPlace
>
,
&
TensorCopyFrom
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"tensor"
),
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"place"
),
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
MLUPlace
>
,
.
def
(
"_copy_from"
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPlace
>
,
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
Place
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"place"
),
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"_copy_from"
,
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
XPUPlace
>
,
&
TensorCopyFrom
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
py
::
arg
(
"tensor"
),
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"place"
),
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
NPUPlace
>
,
.
def
(
"_copy_from"
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
IPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
py
::
arg
(
"place"
),
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"_copy_from"
,
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
&
TensorCopyFrom
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
Place
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
IPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
,
R"DOC(
R"DOC(
Set the data of Tensor on place with given numpy array.
Set the data of Tensor on place with given numpy array.
...
@@ -985,9 +1051,10 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -985,9 +1051,10 @@ PYBIND11_MODULE(core_noavx, m) {
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
)DOC"
)
)DOC"
)
.
def
(
"shape"
,
.
def
(
[](
framework
::
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
},
"shape"
,
R"DOC(
[](
framework
::
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
},
R"DOC(
Return the shape of Tensor.
Return the shape of Tensor.
Returns:
Returns:
...
@@ -1046,25 +1113,26 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1046,25 +1113,26 @@ PYBIND11_MODULE(core_noavx, m) {
ostr
<<
self
;
ostr
<<
self
;
return
ostr
.
str
();
return
ostr
.
str
();
})
/* ------ End of original Tensor ------ */
})
/* ------ End of original Tensor ------ */
.
def
(
.
def
(
"__init__"
,
"__init__"
,
[](
framework
::
Tensor
&
instance
,
[](
framework
::
Tensor
&
instance
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
recursive_sequence_lengths
)
{
&
recursive_sequence_lengths
)
{
LoD
new_lod
;
LoD
new_lod
;
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
std
::
copy
(
recursive_sequence_lengths
.
begin
(),
std
::
copy
(
recursive_sequence_lengths
.
begin
(),
recursive_sequence_lengths
.
end
(),
recursive_sequence_lengths
.
end
(),
std
::
back_inserter
(
new_lod
));
std
::
back_inserter
(
new_lod
));
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_offset_lod
,
-
1
),
true
,
CheckLoD
(
new_offset_lod
,
-
1
),
platform
::
errors
::
InvalidArgument
(
true
,
"The provided recursive_sequence_lengths info is "
platform
::
errors
::
InvalidArgument
(
"invalid, "
"The provided recursive_sequence_lengths info is "
"the LoD converted by recursive_sequence_lengths is %s"
,
"invalid, "
new_lod
));
"the LoD converted by recursive_sequence_lengths is %s"
,
new
(
&
instance
)
framework
::
Tensor
(
new_offset_lod
);
new_lod
));
})
new
(
&
instance
)
framework
::
Tensor
(
new_offset_lod
);
})
.
def
(
"__init__"
,
.
def
(
"__init__"
,
[](
framework
::
Tensor
&
instance
)
{
[](
framework
::
Tensor
&
instance
)
{
new
(
&
instance
)
framework
::
Tensor
();
new
(
&
instance
)
framework
::
Tensor
();
...
@@ -1075,20 +1143,23 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1075,20 +1143,23 @@ PYBIND11_MODULE(core_noavx, m) {
// avoid misuse.
// avoid misuse.
// The discussion is here:
// The discussion is here:
// https://github.com/PaddlePaddle/Paddle/issues/10855
// https://github.com/PaddlePaddle/Paddle/issues/10855
.
def
(
"set_lod"
,
.
def
(
[](
framework
::
Tensor
&
self
,
"set_lod"
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
lod
)
{
[](
framework
::
Tensor
&
self
,
// the input lod is offset-based level-of-detail info
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
lod
)
{
LoD
new_lod
;
// the input lod is offset-based level-of-detail info
new_lod
.
reserve
(
lod
.
size
());
LoD
new_lod
;
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
new_lod
.
reserve
(
lod
.
size
());
PADDLE_ENFORCE_EQ
(
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
CheckLoD
(
new_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
PADDLE_ENFORCE_EQ
(
platform
::
errors
::
InvalidArgument
(
CheckLoD
(
new_lod
,
vectorize
(
self
.
dims
()).
front
()),
"The provided LoD is invalid, the LoD is %s"
,
new_lod
));
true
,
self
.
set_lod
(
new_lod
);
platform
::
errors
::
InvalidArgument
(
},
"The provided LoD is invalid, the LoD is %s"
,
new_lod
));
py
::
arg
(
"lod"
),
R"DOC(
self
.
set_lod
(
new_lod
);
},
py
::
arg
(
"lod"
),
R"DOC(
Set LoD of the Tensor.
Set LoD of the Tensor.
Args:
Args:
...
@@ -1108,28 +1179,32 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1108,28 +1179,32 @@ PYBIND11_MODULE(core_noavx, m) {
t.set_lod([[0, 2, 5]])
t.set_lod([[0, 2, 5]])
print(t.lod()) # [[0, 2, 5]]
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
)DOC"
)
.
def
(
"set_recursive_sequence_lengths"
,
.
def
(
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
"set_recursive_sequence_lengths"
,
&
recursive_sequence_lengths
)
{
[](
framework
::
Tensor
&
self
,
// the input recursive_sequence_lengths is length-based
const
std
::
vector
<
std
::
vector
<
size_t
>>
// level-of-detail info
&
recursive_sequence_lengths
)
{
LoD
new_lod
;
// the input recursive_sequence_lengths is length-based
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
// level-of-detail info
std
::
copy
(
recursive_sequence_lengths
.
begin
(),
LoD
new_lod
;
recursive_sequence_lengths
.
end
(),
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
std
::
back_inserter
(
new_lod
));
std
::
copy
(
recursive_sequence_lengths
.
begin
(),
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
recursive_sequence_lengths
.
end
(),
PADDLE_ENFORCE_EQ
(
std
::
back_inserter
(
new_lod
));
CheckLoD
(
new_offset_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
platform
::
errors
::
InvalidArgument
(
PADDLE_ENFORCE_EQ
(
"The provided recursive_sequence_lengths info is "
CheckLoD
(
new_offset_lod
,
vectorize
(
self
.
dims
()).
front
()),
"invalid, "
true
,
"the LoD converted by recursive_sequence_lengths is "
platform
::
errors
::
InvalidArgument
(
"%s"
,
"The provided recursive_sequence_lengths info is "
new_lod
));
"invalid, "
self
.
set_lod
(
new_offset_lod
);
"the LoD converted by recursive_sequence_lengths is "
},
"%s"
,
py
::
arg
(
"recursive_sequence_lengths"
),
R"DOC(
new_lod
));
self
.
set_lod
(
new_offset_lod
);
},
py
::
arg
(
"recursive_sequence_lengths"
),
R"DOC(
Set LoD of the Tensor according to recursive sequence lengths.
Set LoD of the Tensor according to recursive sequence lengths.
For example, if recursive_sequence_lengths=[[2, 3]], which means
For example, if recursive_sequence_lengths=[[2, 3]], which means
...
@@ -1154,16 +1229,17 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1154,16 +1229,17 @@ PYBIND11_MODULE(core_noavx, m) {
print(t.recursive_sequence_lengths()) # [[2, 3]]
print(t.recursive_sequence_lengths()) # [[2, 3]]
print(t.lod()) # [[0, 2, 5]]
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
)DOC"
)
.
def
(
"lod"
,
.
def
(
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
"lod"
,
// output the offset-based lod info
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
LoD
lod
=
self
.
lod
();
// output the offset-based lod info
std
::
vector
<
std
::
vector
<
size_t
>>
new_lod
;
LoD
lod
=
self
.
lod
();
new_lod
.
reserve
(
lod
.
size
());
std
::
vector
<
std
::
vector
<
size_t
>>
new_lod
;
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
new_lod
.
reserve
(
lod
.
size
());
return
new_lod
;
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
},
return
new_lod
;
R"DOC(
},
R"DOC(
Return the LoD of the Tensor.
Return the LoD of the Tensor.
Returns:
Returns:
...
@@ -1181,16 +1257,17 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1181,16 +1257,17 @@ PYBIND11_MODULE(core_noavx, m) {
print(t.lod()) # [[0, 2, 5]]
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
)DOC"
)
// Set above comments of set_lod.
// Set above comments of set_lod.
.
def
(
"recursive_sequence_lengths"
,
.
def
(
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
"recursive_sequence_lengths"
,
// output the length-based lod info
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
LoD
lod
=
phi
::
ConvertToLengthBasedLoD
(
self
.
lod
());
// output the length-based lod info
std
::
vector
<
std
::
vector
<
size_t
>>
new_lod
;
LoD
lod
=
phi
::
ConvertToLengthBasedLoD
(
self
.
lod
());
new_lod
.
reserve
(
lod
.
size
());
std
::
vector
<
std
::
vector
<
size_t
>>
new_lod
;
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
new_lod
.
reserve
(
lod
.
size
());
return
new_lod
;
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
},
return
new_lod
;
R"DOC(
},
R"DOC(
Return the recursive sequence lengths corresponding to of the LodD
Return the recursive sequence lengths corresponding to of the LodD
of the Tensor.
of the Tensor.
...
@@ -1208,13 +1285,14 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1208,13 +1285,14 @@ PYBIND11_MODULE(core_noavx, m) {
t.set_recursive_sequence_lengths([[2, 3]])
t.set_recursive_sequence_lengths([[2, 3]])
print(t.recursive_sequence_lengths()) # [[2, 3]]
print(t.recursive_sequence_lengths()) # [[2, 3]]
)DOC"
)
)DOC"
)
.
def
(
"has_valid_recursive_sequence_lengths"
,
.
def
(
[](
framework
::
Tensor
&
self
)
->
bool
{
"has_valid_recursive_sequence_lengths"
,
// Check that the lod info is valid and match the outermost
[](
framework
::
Tensor
&
self
)
->
bool
{
// dimension of the Tensor data
// Check that the lod info is valid and match the outermost
return
CheckLoD
(
self
.
lod
(),
vectorize
(
self
.
dims
()).
front
());
// dimension of the Tensor data
},
return
CheckLoD
(
self
.
lod
(),
vectorize
(
self
.
dims
()).
front
());
R"DOC(
},
R"DOC(
Check whether the LoD of the Tensor is valid.
Check whether the LoD of the Tensor is valid.
Returns:
Returns:
...
@@ -1594,13 +1672,15 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1594,13 +1672,15 @@ PYBIND11_MODULE(core_noavx, m) {
new
(
&
instance
)
phi
::
SelectedRows
();
new
(
&
instance
)
phi
::
SelectedRows
();
})
})
.
def
(
"__init__"
,
.
def
(
"__init__"
,
[](
phi
::
SelectedRows
&
instance
,
const
std
::
vector
<
int64_t
>
rows
,
[](
phi
::
SelectedRows
&
instance
,
const
std
::
vector
<
int64_t
>
rows
,
const
int64_t
&
height
)
{
const
int64_t
&
height
)
{
new
(
&
instance
)
phi
::
SelectedRows
(
rows
,
height
);
new
(
&
instance
)
phi
::
SelectedRows
(
rows
,
height
);
})
})
.
def
(
"get_tensor"
,
.
def
(
[](
phi
::
SelectedRows
&
self
)
{
return
self
.
mutable_value
();
},
"get_tensor"
,
py
::
return_value_policy
::
reference
)
[](
phi
::
SelectedRows
&
self
)
{
return
self
.
mutable_value
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"numel"
,
.
def
(
"numel"
,
[](
phi
::
SelectedRows
&
self
)
->
int64_t
{
[](
phi
::
SelectedRows
&
self
)
->
int64_t
{
return
self
.
value
().
numel
();
return
self
.
value
().
numel
();
...
@@ -1642,11 +1722,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1642,11 +1722,12 @@ All parameter, weight, gradient are variables in Paddle.
})
})
.
def
(
"get_float"
,
.
def
(
"get_float"
,
[](
const
Variable
&
var
)
->
float
{
return
var
.
Get
<
float
>
();
})
[](
const
Variable
&
var
)
->
float
{
return
var
.
Get
<
float
>
();
})
.
def
(
"get_tensor"
,
.
def
(
[](
Variable
&
self
)
->
LoDTensor
*
{
"get_tensor"
,
return
self
.
GetMutable
<
LoDTensor
>
();
[](
Variable
&
self
)
->
LoDTensor
*
{
},
return
self
.
GetMutable
<
LoDTensor
>
();
py
::
return_value_policy
::
reference
)
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_bytes"
,
.
def
(
"get_bytes"
,
[](
Variable
&
self
)
{
[](
Variable
&
self
)
{
return
py
::
bytes
(
*
self
.
GetMutable
<
std
::
string
>
());
return
py
::
bytes
(
*
self
.
GetMutable
<
std
::
string
>
());
...
@@ -1655,55 +1736,66 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1655,55 +1736,66 @@ All parameter, weight, gradient are variables in Paddle.
[](
Variable
&
self
,
Strings
str_list
)
{
[](
Variable
&
self
,
Strings
str_list
)
{
*
self
.
GetMutable
<
Strings
>
()
=
str_list
;
*
self
.
GetMutable
<
Strings
>
()
=
str_list
;
})
})
.
def
(
"set_vocab"
,
[](
Variable
&
self
,
.
def
(
"set_vocab"
,
Vocab
vocab
)
{
*
self
.
GetMutable
<
Vocab
>
()
=
vocab
;
})
[](
Variable
&
self
,
Vocab
vocab
)
{
.
def
(
"get_string_tensor"
,
*
self
.
GetMutable
<
Vocab
>
()
=
vocab
;
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Strings
>
();
},
})
py
::
return_value_policy
::
reference
)
.
def
(
.
def
(
"get_map_tensor"
,
"get_string_tensor"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Vocab
>
();
},
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Strings
>
();
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_rank_table"
,
.
def
(
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDRankTable
>
();
},
"get_map_tensor"
,
py
::
return_value_policy
::
reference
)
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Vocab
>
();
},
.
def
(
"get_selected_rows"
,
py
::
return_value_policy
::
reference
)
[](
Variable
&
self
)
->
phi
::
SelectedRows
*
{
.
def
(
return
self
.
GetMutable
<
phi
::
SelectedRows
>
();
"get_lod_rank_table"
,
},
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDRankTable
>
();
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_tensor_array"
,
.
def
(
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
"get_selected_rows"
,
py
::
return_value_policy
::
reference
)
[](
Variable
&
self
)
->
phi
::
SelectedRows
*
{
.
def
(
"get_fetch_list"
,
return
self
.
GetMutable
<
phi
::
SelectedRows
>
();
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
FetchList
>
();
},
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_tensor_array"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_fetch_list"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
FetchList
>
();
},
py
::
return_value_policy
::
reference
)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
.
def
(
"get_communicator"
,
.
def
(
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
"get_communicator"
,
return
self
.
GetMutable
<
platform
::
Communicator
>
();
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
},
return
self
.
GetMutable
<
platform
::
Communicator
>
();
py
::
return_value_policy
::
reference
)
},
py
::
return_value_policy
::
reference
)
#endif
#endif
.
def
(
"get_reader"
,
.
def
(
[](
Variable
&
self
)
->
framework
::
ReaderHolder
*
{
"get_reader"
,
PADDLE_ENFORCE_EQ
(
[](
Variable
&
self
)
->
framework
::
ReaderHolder
*
{
self
.
IsType
<
framework
::
ReaderHolder
>
(),
true
,
PADDLE_ENFORCE_EQ
(
self
.
IsType
<
framework
::
ReaderHolder
>
(),
platform
::
errors
::
InvalidArgument
(
true
,
"The variable is not type of ReaderHolder."
));
platform
::
errors
::
InvalidArgument
(
return
self
.
GetMutable
<
framework
::
ReaderHolder
>
();
"The variable is not type of ReaderHolder."
));
},
return
self
.
GetMutable
<
framework
::
ReaderHolder
>
();
py
::
return_value_policy
::
reference
)
},
.
def
(
"get_scope"
,
py
::
return_value_policy
::
reference
)
[](
Variable
&
self
)
->
Scope
*
{
.
def
(
auto
scope_vec
=
"get_scope"
,
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
[](
Variable
&
self
)
->
Scope
*
{
PADDLE_ENFORCE_GT
(
auto
scope_vec
=
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
scope_vec
->
size
(),
0
,
PADDLE_ENFORCE_GT
(
platform
::
errors
::
InvalidArgument
(
scope_vec
->
size
(),
"The size of scope_vec should be greater than 0"
));
0
,
return
scope_vec
->
front
();
platform
::
errors
::
InvalidArgument
(
},
"The size of scope_vec should be greater than 0"
));
py
::
return_value_policy
::
reference
)
return
scope_vec
->
front
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"set_scope"
,
[](
Variable
&
self
,
Scope
&
scope
)
{
.
def
(
"set_scope"
,
[](
Variable
&
self
,
Scope
&
scope
)
{
auto
scope_vec
=
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
auto
scope_vec
=
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
scope_vec
->
emplace_back
(
&
scope
);
scope_vec
->
emplace_back
(
&
scope
);
...
@@ -1736,12 +1828,13 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1736,12 +1828,13 @@ All parameter, weight, gradient are variables in Paddle.
_Scope
_Scope
.
def
(
"_remove_from_pool"
,
.
def
(
"_remove_from_pool"
,
[](
Scope
&
self
)
{
ScopePool
::
Instance
().
Remove
(
&
self
);
})
[](
Scope
&
self
)
{
ScopePool
::
Instance
().
Remove
(
&
self
);
})
.
def
(
"var"
,
.
def
(
[](
Scope
&
self
,
const
std
::
string
&
name
)
->
Variable
*
{
"var"
,
return
self
.
Var
(
name
);
[](
Scope
&
self
,
const
std
::
string
&
name
)
->
Variable
*
{
},
return
self
.
Var
(
name
);
py
::
arg
(
"name"
),
},
R"DOC(
py
::
arg
(
"name"
),
R"DOC(
Find or create variable named :code:`name` in the current scope.
Find or create variable named :code:`name` in the current scope.
If the variable named :code:`name` does not exist in the
If the variable named :code:`name` does not exist in the
...
@@ -1754,8 +1847,10 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1754,8 +1847,10 @@ All parameter, weight, gradient are variables in Paddle.
Returns:
Returns:
out (core.Variable): the found or created variable.
out (core.Variable): the found or created variable.
)DOC"
,
)DOC"
,
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"find_var"
,
&
Scope
::
FindVar
,
py
::
arg
(
"name"
),
.
def
(
"find_var"
,
&
Scope
::
FindVar
,
py
::
arg
(
"name"
),
R"DOC(
R"DOC(
Find variable named :code:`name` in the current scope or
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
its parent scope. Return None if not found.
...
@@ -1768,7 +1863,9 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1768,7 +1863,9 @@ All parameter, weight, gradient are variables in Paddle.
)DOC"
,
)DOC"
,
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"size"
,
&
Scope
::
Size
)
.
def
(
"size"
,
&
Scope
::
Size
)
.
def
(
"erase"
,
&
Scope
::
EraseVars
,
py
::
arg
(
"names"
),
.
def
(
"erase"
,
&
Scope
::
EraseVars
,
py
::
arg
(
"names"
),
R"DOC(
R"DOC(
Find variable named :code:`name` in the current scope or
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
its parent scope. Return None if not found.
...
@@ -1780,33 +1877,37 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1780,33 +1877,37 @@ All parameter, weight, gradient are variables in Paddle.
None
None
)DOC"
,
)DOC"
,
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"new_scope"
,
[](
Scope
&
self
)
->
Scope
*
{
return
&
self
.
NewScope
();
},
.
def
(
R"DOC(
"new_scope"
,
[](
Scope
&
self
)
->
Scope
*
{
return
&
self
.
NewScope
();
},
R"DOC(
Create a new sub-scope of the current scope.
Create a new sub-scope of the current scope.
Returns:
Returns:
out (core._Scope): the created sub-scope.
out (core._Scope): the created sub-scope.
)DOC"
,
)DOC"
,
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"drop_kids"
,
&
Scope
::
DropKids
,
.
def
(
"drop_kids"
,
&
Scope
::
DropKids
,
R"DOC(
R"DOC(
Delete all sub-scopes of the current scope.
Delete all sub-scopes of the current scope.
)DOC"
)
)DOC"
)
.
def
(
"_kids"
,
&
Scope
::
kids
);
.
def
(
"_kids"
,
&
Scope
::
kids
);
m
.
def
(
"Scope"
,
m
.
def
(
[]()
->
Scope
*
{
"Scope"
,
auto
*
s
=
new
Scope
();
[]()
->
Scope
*
{
ScopePool
::
Instance
().
Insert
(
std
::
unique_ptr
<
Scope
>
(
s
));
auto
*
s
=
new
Scope
();
return
s
;
ScopePool
::
Instance
().
Insert
(
std
::
unique_ptr
<
Scope
>
(
s
));
},
return
s
;
R"DOC(
},
R"DOC(
Create a new scope.
Create a new scope.
Returns:
Returns:
out (core._Scope): the created scope.
out (core._Scope): the created scope.
)DOC"
,
)DOC"
,
py
::
return_value_policy
::
reference
);
py
::
return_value_policy
::
reference
);
//! @note: Be careful! PyBind will return std::string as an unicode, not
//! @note: Be careful! PyBind will return std::string as an unicode, not
//! Python str. If you want a str object, you should cast them in Python.
//! Python str. If you want a str object, you should cast them in Python.
...
@@ -1817,7 +1918,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1817,7 +1918,8 @@ All parameter, weight, gradient are variables in Paddle.
if
(
info
.
HasOpProtoAndChecker
())
{
if
(
info
.
HasOpProtoAndChecker
())
{
std
::
string
str
;
std
::
string
str
;
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
info
.
Proto
().
SerializeToString
(
&
str
),
true
,
info
.
Proto
().
SerializeToString
(
&
str
),
true
,
platform
::
errors
::
Fatal
(
platform
::
errors
::
Fatal
(
"Serialize OpProto Error. This could be a bug of Paddle."
));
"Serialize OpProto Error. This could be a bug of Paddle."
));
ret_values
.
emplace_back
(
str
);
ret_values
.
emplace_back
(
str
);
...
@@ -1838,22 +1940,24 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1838,22 +1940,24 @@ All parameter, weight, gradient are variables in Paddle.
}
}
return
res
;
return
res
;
});
});
m
.
def
(
m
.
def
(
"get_grad_op_desc"
,
"get_grad_op_desc"
,
[](
const
OpDesc
&
op_desc
,
[](
const
OpDesc
&
op_desc
,
const
std
::
unordered_set
<
std
::
string
>
&
no_grad_set
,
const
std
::
unordered_set
<
std
::
string
>
&
no_grad_set
,
const
std
::
vector
<
BlockDesc
*>
&
grad_sub_block
)
{
const
std
::
vector
<
BlockDesc
*>
&
grad_sub_block
)
{
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_to_var
;
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_to_var
;
std
::
vector
<
std
::
unique_ptr
<
OpDesc
>>
grad_op_descs
=
std
::
vector
<
std
::
unique_ptr
<
OpDesc
>>
grad_op_descs
=
framework
::
OpInfoMap
::
Instance
()
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
.
Get
(
op_desc
.
Type
())
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
&
grad_to_var
,
.
GradOpMaker
()(
grad_sub_block
);
op_desc
,
no_grad_set
,
&
grad_to_var
,
grad_sub_block
);
std
::
vector
<
OpDesc
*>
grad_op_desc_ptrs
(
grad_op_descs
.
size
());
std
::
vector
<
OpDesc
*>
grad_op_desc_ptrs
(
grad_op_descs
.
size
());
std
::
transform
(
grad_op_descs
.
begin
(),
grad_op_descs
.
end
(),
std
::
transform
(
grad_op_desc_ptrs
.
begin
(),
grad_op_descs
.
begin
(),
[](
std
::
unique_ptr
<
OpDesc
>
&
p
)
{
return
p
.
release
();
});
grad_op_descs
.
end
(),
return
std
::
make_pair
(
grad_op_desc_ptrs
,
grad_to_var
);
grad_op_desc_ptrs
.
begin
(),
});
[](
std
::
unique_ptr
<
OpDesc
>
&
p
)
{
return
p
.
release
();
});
return
std
::
make_pair
(
grad_op_desc_ptrs
,
grad_to_var
);
});
m
.
def
(
"has_grad_op_maker"
,
[](
const
std
::
string
op_type
)
{
m
.
def
(
"has_grad_op_maker"
,
[](
const
std
::
string
op_type
)
{
return
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type
).
HasGradOpMaker
();
return
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type
).
HasGradOpMaker
();
});
});
...
@@ -1866,7 +1970,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1866,7 +1970,8 @@ All parameter, weight, gradient are variables in Paddle.
return
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type
).
HasInferInplace
();
return
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type
).
HasInferInplace
();
});
});
m
.
def
(
"infer_no_need_buffer_slots"
,
m
.
def
(
"infer_no_need_buffer_slots"
,
[](
const
std
::
string
op_type
,
const
framework
::
VariableNameMap
&
inputs
,
[](
const
std
::
string
op_type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
{
const
framework
::
AttributeMap
&
attrs
)
{
auto
infer_func
=
framework
::
OpInfoMap
::
Instance
()
auto
infer_func
=
framework
::
OpInfoMap
::
Instance
()
...
@@ -1879,25 +1984,27 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1879,25 +1984,27 @@ All parameter, weight, gradient are variables in Paddle.
return
empty
;
return
empty
;
}
}
});
});
m
.
def
(
"prune"
,
[](
const
ProgramDesc
&
origin
,
m
.
def
(
"prune"
,
const
std
::
set
<
std
::
string
>
&
feeded_var_names
,
[](
const
ProgramDesc
&
origin
,
const
std
::
vector
<
std
::
array
<
size_t
,
2
>>
&
targets
)
{
const
std
::
set
<
std
::
string
>
&
feeded_var_names
,
ProgramDesc
prog_with_targets
(
origin
);
const
std
::
vector
<
std
::
array
<
size_t
,
2
>>
&
targets
)
{
ProgramDesc
prog_with_targets
(
origin
);
for
(
const
auto
&
t
:
targets
)
{
prog_with_targets
.
MutableBlock
(
t
[
0
])
->
Op
(
t
[
1
])
->
SetIsTarget
(
true
);
for
(
const
auto
&
t
:
targets
)
{
}
prog_with_targets
.
MutableBlock
(
t
[
0
])
->
Op
(
t
[
1
])
->
SetIsTarget
(
true
);
proto
::
ProgramDesc
pruned_desc
;
}
auto
pruned_origin_block_id_map
=
proto
::
ProgramDesc
pruned_desc
;
Prune
(
*
prog_with_targets
.
Proto
(),
feeded_var_names
,
&
pruned_desc
);
auto
pruned_origin_block_id_map
=
return
std
::
make_tuple
(
ProgramDesc
(
pruned_desc
),
Prune
(
*
prog_with_targets
.
Proto
(),
feeded_var_names
,
&
pruned_desc
);
pruned_origin_block_id_map
);
return
std
::
make_tuple
(
ProgramDesc
(
pruned_desc
),
});
pruned_origin_block_id_map
);
m
.
def
(
"prune_backward"
,
});
[](
const
framework
::
ProgramDesc
&
program
)
{
m
.
def
(
return
PruneBackward
(
program
);
"prune_backward"
,
},
[](
const
framework
::
ProgramDesc
&
program
)
{
R"DOC(
return
PruneBackward
(
program
);
},
R"DOC(
Prune the backward part of a program, mostly called in
Prune the backward part of a program, mostly called in
program.clone(for_test=True).
program.clone(for_test=True).
...
@@ -2040,12 +2147,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2040,12 +2147,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types
=
phi
::
DeviceManager
::
GetAllDeviceTypes
();
device_types
=
phi
::
DeviceManager
::
GetAllDeviceTypes
();
#else
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_all_device_type because you have installed"
"Cannot use get_all_device_type because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_all_device_type, please try to install"
"If you want to use get_all_device_type, please try to install"
"CustomDevice version "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
#endif
return
device_types
;
return
device_types
;
});
});
...
@@ -2054,12 +2161,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2054,12 +2161,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types
=
phi
::
DeviceManager
::
GetAllCustomDeviceTypes
();
device_types
=
phi
::
DeviceManager
::
GetAllCustomDeviceTypes
();
#else
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_all_custom_device_type because you have installed"
"Cannot use get_all_custom_device_type because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_all_custom_device_type, please try to "
"If you want to use get_all_custom_device_type, please try to "
"install CustomDevice version "
"install CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
#endif
return
device_types
;
return
device_types
;
});
});
...
@@ -2068,12 +2175,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2068,12 +2175,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices
=
phi
::
DeviceManager
::
GetAllDeviceList
();
devices
=
phi
::
DeviceManager
::
GetAllDeviceList
();
#else
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_available_device because you have installed"
"Cannot use get_available_device because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_available_device, please try to install"
"If you want to use get_available_device, please try to install"
"CustomDevice version "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
#endif
return
devices
;
return
devices
;
});
});
...
@@ -2082,18 +2189,19 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2082,18 +2189,19 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices
=
phi
::
DeviceManager
::
GetAllCustomDeviceList
();
devices
=
phi
::
DeviceManager
::
GetAllCustomDeviceList
();
#else
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_available_custom_device because you have "
"Cannot use get_available_custom_device because you have "
"installed"
"installed"
"CPU/GPU version PaddlePaddle.
\n
"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_available_custom_device, please try to "
"If you want to use get_available_custom_device, please try to "
"install"
"install"
"CustomDevice version "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
#endif
return
devices
;
return
devices
;
});
});
py
::
class_
<
platform
::
CustomPlace
>
(
m
,
"CustomPlace"
,
py
::
class_
<
platform
::
CustomPlace
>
(
m
,
"CustomPlace"
,
R"DOC(
R"DOC(
CustomPlace is a descriptor of a device.
CustomPlace is a descriptor of a device.
It represents a custom device on which a tensor will be allocated and a model will run.
It represents a custom device on which a tensor will be allocated and a model will run.
...
@@ -2105,7 +2213,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2105,7 +2213,8 @@ All parameter, weight, gradient are variables in Paddle.
fake_cpu_place = paddle.CustomPlace("FakeCPU", 0)
fake_cpu_place = paddle.CustomPlace("FakeCPU", 0)
)DOC"
)
)DOC"
)
.
def
(
"__init__"
,
.
def
(
"__init__"
,
[](
platform
::
CustomPlace
&
self
,
const
std
::
string
&
device_type
,
[](
platform
::
CustomPlace
&
self
,
const
std
::
string
&
device_type
,
int
dev_id
)
{
int
dev_id
)
{
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
if
(
UNLIKELY
(
dev_id
<
0
))
{
if
(
UNLIKELY
(
dev_id
<
0
))
{
...
@@ -2113,7 +2222,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2113,7 +2222,8 @@ All parameter, weight, gradient are variables in Paddle.
"Invalid CustomPlace(%s, %d), device id must be 0 "
"Invalid CustomPlace(%s, %d), device id must be 0 "
"or "
"or "
"positive integer"
,
"positive integer"
,
device_type
,
dev_id
);
device_type
,
dev_id
);
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2134,7 +2244,11 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2134,7 +2244,11 @@ All parameter, weight, gradient are variables in Paddle.
"inside "
"inside "
"[0, %d), because %s "
"[0, %d), because %s "
"number on your machine is %d"
,
"number on your machine is %d"
,
device_type
,
dev_id
,
dev_count
,
device_type
,
dev_count
);
device_type
,
dev_id
,
dev_count
,
device_type
,
dev_count
);
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
}
}
...
@@ -2144,7 +2258,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2144,7 +2258,8 @@ All parameter, weight, gradient are variables in Paddle.
"Invalid CustomPlace(%s, %d), the device type is "
"Invalid CustomPlace(%s, %d), the device type is "
"not registered "
"not registered "
"as a custom device."
,
"as a custom device."
,
device_type
,
dev_id
);
device_type
,
dev_id
);
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
#else
#else
...
@@ -2153,7 +2268,7 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2153,7 +2268,7 @@ All parameter, weight, gradient are variables in Paddle.
"version PaddlePaddle.
\n
"
"version PaddlePaddle.
\n
"
"If you want to use CustomDevice, please try to install"
"If you want to use CustomDevice, please try to install"
"CustomDevice version "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
"PaddlePaddle by: pip install paddlepaddle
\n
"
"If you only have CPU, please change "
"If you only have CPU, please change "
"CustomPlace(%s, %d) to be CPUPlace().
\n
"
,
"CustomPlace(%s, %d) to be CPUPlace().
\n
"
,
device_type
,
dev_id
);
device_type
,
dev_id
);
...
@@ -2215,7 +2330,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2215,7 +2330,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid CUDAPlace(%d), must inside [0, %d), because GPU "
"Invalid CUDAPlace(%d), must inside [0, %d), because GPU "
"number on your machine is %d"
,
"number on your machine is %d"
,
dev_id
,
platform
::
GetGPUDeviceCount
(),
dev_id
,
platform
::
GetGPUDeviceCount
(),
platform
::
GetGPUDeviceCount
());
platform
::
GetGPUDeviceCount
());
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2281,7 +2397,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2281,7 +2397,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid XPUPlace(%d), must inside [0, %d), because XPU "
"Invalid XPUPlace(%d), must inside [0, %d), because XPU "
"number on your machine is %d"
,
"number on your machine is %d"
,
dev_id
,
platform
::
GetXPUDeviceCount
(),
dev_id
,
platform
::
GetXPUDeviceCount
(),
platform
::
GetXPUDeviceCount
());
platform
::
GetXPUDeviceCount
());
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2446,7 +2563,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2446,7 +2563,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid NPUPlace(%d), must inside [0, %d), because NPU "
"Invalid NPUPlace(%d), must inside [0, %d), because NPU "
"number on your machine is %d"
,
"number on your machine is %d"
,
dev_id
,
platform
::
GetNPUDeviceCount
(),
dev_id
,
platform
::
GetNPUDeviceCount
(),
platform
::
GetNPUDeviceCount
());
platform
::
GetNPUDeviceCount
());
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2562,7 +2680,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2562,7 +2680,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid MLUPlace(%d), must inside [0, %d), because MLU "
"Invalid MLUPlace(%d), must inside [0, %d), because MLU "
"number on your machine is %d"
,
"number on your machine is %d"
,
dev_id
,
platform
::
GetMLUDeviceCount
(),
dev_id
,
platform
::
GetMLUDeviceCount
(),
platform
::
GetMLUDeviceCount
());
platform
::
GetMLUDeviceCount
());
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2635,8 +2754,10 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2635,8 +2754,10 @@ All parameter, weight, gradient are variables in Paddle.
.
def
(
"mlu_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"mlu_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"custom_device_id"
,
.
def
(
"custom_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
.
def
(
"set_place"
,
const
platform
::
Place
&
other
)
{
self
=
other
;
})
[](
platform
::
Place
&
self
,
const
platform
::
Place
&
other
)
{
self
=
other
;
})
.
def
(
"set_place"
,
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
const
platform
::
CPUPlace
&
cpu_place
)
{
[](
platform
::
Place
&
self
,
const
platform
::
CPUPlace
&
cpu_place
)
{
self
=
cpu_place
;
self
=
cpu_place
;
...
@@ -2681,7 +2802,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2681,7 +2802,8 @@ All parameter, weight, gradient are variables in Paddle.
true
,
true
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"Cannot parse user input to OpDesc"
));
"Cannot parse user input to OpDesc"
));
PADDLE_ENFORCE_EQ
(
desc
.
IsInitialized
(),
true
,
PADDLE_ENFORCE_EQ
(
desc
.
IsInitialized
(),
true
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The provided OpDesc is not "
"The provided OpDesc is not "
"initialized, the reason is: %s"
,
"initialized, the reason is: %s"
,
...
@@ -2689,37 +2811,43 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2689,37 +2811,43 @@ All parameter, weight, gradient are variables in Paddle.
return
OpRegistry
::
CreateOp
(
desc
);
return
OpRegistry
::
CreateOp
(
desc
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CPUPlace
&
place
)
{
const
platform
::
CPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
XPUPlace
&
place
)
{
const
platform
::
XPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
NPUPlace
&
place
)
{
const
platform
::
NPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CUDAPlace
&
place
)
{
const
platform
::
CUDAPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CUDAPinnedPlace
&
place
)
{
const
platform
::
CUDAPinnedPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
MLUPlace
&
place
)
{
const
platform
::
MLUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
...
@@ -2729,8 +2857,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2729,8 +2857,8 @@ All parameter, weight, gradient are variables in Paddle.
.
def
(
"outputs"
,
.
def
(
"outputs"
,
[](
const
OperatorBase
&
op
)
[](
const
OperatorBase
&
op
)
->
std
::
map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
{
->
std
::
map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
{
return
op
.
Outputs
();
return
op
.
Outputs
();
})
})
.
def
(
"output_vars"
,
.
def
(
"output_vars"
,
[](
const
OperatorBase
&
op
)
{
return
op
.
OutputVars
(
true
);
})
[](
const
OperatorBase
&
op
)
{
return
op
.
OutputVars
(
true
);
})
.
def
(
"inputs"
,
[](
const
OperatorBase
&
op
)
{
return
op
.
Inputs
();
})
.
def
(
"inputs"
,
[](
const
OperatorBase
&
op
)
{
return
op
.
Inputs
();
})
...
@@ -2745,11 +2873,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2745,11 +2873,12 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
framework
::
TrainerBase
,
std
::
shared_ptr
<
framework
::
TrainerBase
>>
(
py
::
class_
<
framework
::
TrainerBase
,
std
::
shared_ptr
<
framework
::
TrainerBase
>>
(
m
,
"TrainerBase"
)
m
,
"TrainerBase"
)
.
def
(
"get_worker_scope"
,
.
def
(
[](
TrainerBase
&
self
,
int
thread_id
)
->
Scope
*
{
"get_worker_scope"
,
return
self
.
GetWorkerScope
(
thread_id
);
[](
TrainerBase
&
self
,
int
thread_id
)
->
Scope
*
{
},
return
self
.
GetWorkerScope
(
thread_id
);
py
::
return_value_policy
::
reference
)
},
py
::
return_value_policy
::
reference
)
.
def
(
"finalize"
,
&
TrainerBase
::
Finalize
)
.
def
(
"finalize"
,
&
TrainerBase
::
Finalize
)
.
def
(
"ResetDataset"
,
&
TrainerBase
::
ResetDataset
);
.
def
(
"ResetDataset"
,
&
TrainerBase
::
ResetDataset
);
...
@@ -2758,13 +2887,17 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2758,13 +2887,17 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
framework
::
Executor
>
(
m
,
"Executor"
)
py
::
class_
<
framework
::
Executor
>
(
m
,
"Executor"
)
.
def
(
py
::
init
<
const
platform
::
Place
&>
())
.
def
(
py
::
init
<
const
platform
::
Place
&>
())
.
def
(
"close"
,
&
Executor
::
Close
)
.
def
(
"close"
,
&
Executor
::
Close
)
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
,
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"release_trainer"
,
&
Executor
::
ReleaseTrainer
,
.
def
(
"release_trainer"
,
&
Executor
::
ReleaseTrainer
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"init_for_dataset"
,
.
def
(
"init_for_dataset"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
[](
Executor
&
self
,
const
std
::
string
&
trainer_desc
,
Scope
*
scope
,
const
ProgramDesc
&
prog
,
const
std
::
string
&
trainer_desc
,
Scope
*
scope
,
Dataset
*
dataset
)
->
std
::
shared_ptr
<
TrainerBase
>
{
Dataset
*
dataset
)
->
std
::
shared_ptr
<
TrainerBase
>
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
return
self
.
InitForDataset
(
prog
,
trainer_desc
,
scope
,
dataset
);
return
self
.
InitForDataset
(
prog
,
trainer_desc
,
scope
,
dataset
);
...
@@ -2775,42 +2908,64 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2775,42 +2908,64 @@ All parameter, weight, gradient are variables in Paddle.
self
.
RunFromDataset
(
trainer
);
self
.
RunFromDataset
(
trainer
);
})
})
.
def
(
"run_prepared_ctx"
,
.
def
(
"run_prepared_ctx"
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>
*
feed_targets
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>
*
feed_targets
,
std
::
map
<
std
::
string
,
FetchType
*>
*
fetch_targets
,
std
::
map
<
std
::
string
,
FetchType
*>
*
fetch_targets
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
fetch_holder_name
=
"fetch"
)
{
const
std
::
string
&
fetch_holder_name
=
"fetch"
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
RunPreparedContext
(
ctx
,
scope
,
feed_targets
,
fetch_targets
,
self
.
RunPreparedContext
(
ctx
,
create_local_scope
,
create_vars
,
scope
,
feed_holder_name
,
fetch_holder_name
);
feed_targets
,
fetch_targets
,
create_local_scope
,
create_vars
,
feed_holder_name
,
fetch_holder_name
);
})
})
.
def
(
"run_prepared_ctx"
,
.
def
(
"run_prepared_ctx"
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
[](
Executor
&
self
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
bool
keep_kids
=
false
)
{
bool
keep_kids
=
false
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
RunPreparedContext
(
ctx
,
scope
,
create_local_scope
,
self
.
RunPreparedContext
(
create_vars
,
keep_kids
);
ctx
,
scope
,
create_local_scope
,
create_vars
,
keep_kids
);
})
})
.
def
(
"prepare"
,
.
def
(
"prepare"
,
[](
Executor
&
self
,
const
ProgramDesc
&
program
,
int
block_id
,
[](
Executor
&
self
,
const
ProgramDesc
&
program
,
int
block_id
,
const
std
::
vector
<
std
::
string
>
&
skip_ref_cnt_vars
=
const
std
::
vector
<
std
::
string
>
&
skip_ref_cnt_vars
=
std
::
vector
<
std
::
string
>
(),
std
::
vector
<
std
::
string
>
(),
bool
force_disable_gc
=
false
)
{
bool
force_disable_gc
=
false
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
return
self
.
Prepare
(
program
,
block_id
,
skip_ref_cnt_vars
,
return
self
.
Prepare
(
force_disable_gc
);
program
,
block_id
,
skip_ref_cnt_vars
,
force_disable_gc
);
})
})
.
def
(
"create_variables"
,
&
Executor
::
CreateVariables
)
.
def
(
"create_variables"
,
&
Executor
::
CreateVariables
)
.
def
(
"run"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
Scope
*
scope
,
.
def
(
"run"
,
int
block_id
,
bool
create_local_scope
,
bool
create_vars
,
[](
Executor
&
self
,
const
std
::
vector
<
std
::
string
>
&
fetch_vars
)
{
const
ProgramDesc
&
prog
,
pybind11
::
gil_scoped_release
release
;
Scope
*
scope
,
self
.
Run
(
prog
,
scope
,
block_id
,
create_local_scope
,
create_vars
,
int
block_id
,
fetch_vars
);
bool
create_local_scope
,
});
bool
create_vars
,
const
std
::
vector
<
std
::
string
>
&
fetch_vars
)
{
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
prog
,
scope
,
block_id
,
create_local_scope
,
create_vars
,
fetch_vars
);
});
py
::
class_
<
framework
::
interpreter
::
CostInfo
>
(
m
,
"CostInfo"
)
py
::
class_
<
framework
::
interpreter
::
CostInfo
>
(
m
,
"CostInfo"
)
.
def
(
py
::
init
<>
())
.
def
(
py
::
init
<>
())
...
@@ -2821,8 +2976,10 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2821,8 +2976,10 @@ All parameter, weight, gradient are variables in Paddle.
});
});
py
::
class_
<
framework
::
StandaloneExecutor
>
(
m
,
"StandaloneExecutor"
)
py
::
class_
<
framework
::
StandaloneExecutor
>
(
m
,
"StandaloneExecutor"
)
.
def
(
py
::
init
<
const
platform
::
Place
&
,
const
ProgramDesc
&
,
.
def
(
py
::
init
<
const
platform
::
Place
&
,
const
ProgramDesc
&
,
Scope
*>
())
const
ProgramDesc
&
,
const
ProgramDesc
&
,
Scope
*>
())
.
def
(
"run"
,
.
def
(
"run"
,
[](
StandaloneExecutor
&
self
,
[](
StandaloneExecutor
&
self
,
const
std
::
unordered_map
<
std
::
string
,
py
::
array
>
&
input_dict
,
const
std
::
unordered_map
<
std
::
string
,
py
::
array
>
&
input_dict
,
...
@@ -2866,11 +3023,13 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2866,11 +3023,13 @@ All parameter, weight, gradient are variables in Paddle.
return
py
::
cast
(
std
::
move
(
ret
));
return
py
::
cast
(
std
::
move
(
ret
));
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
StandaloneExecutor
&
self
,
std
::
vector
<
std
::
string
>
feed_names
,
[](
StandaloneExecutor
&
self
,
std
::
vector
<
std
::
string
>
feed_names
,
std
::
vector
<
std
::
string
>
fetch_names
)
{
std
::
vector
<
std
::
string
>
fetch_names
)
{
platform
::
RecordEvent
record_event
(
platform
::
RecordEvent
record_event
(
"StandaloneExecutor:run"
,
"StandaloneExecutor:run"
,
platform
::
TracerEventType
::
UserDefined
,
1
);
platform
::
TracerEventType
::
UserDefined
,
1
);
paddle
::
framework
::
FetchList
ret
;
paddle
::
framework
::
FetchList
ret
;
{
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
...
@@ -2951,21 +3110,30 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2951,21 +3110,30 @@ All parameter, weight, gradient are variables in Paddle.
});
});
m
.
def
(
"memory_stat_get_current"
,
memory
::
StatGetCurrentValue
);
m
.
def
(
"memory_stat_get_current"
,
memory
::
StatGetCurrentValue
);
m
.
def
(
"memory_stat_get_peak"
,
memory
::
StatGetPeakValue
);
m
.
def
(
"memory_stat_get_peak"
,
memory
::
StatGetPeakValue
);
m
.
def
(
"run_cmd"
,
m
.
def
(
[](
const
std
::
string
&
cmd
,
int
time_out
=
-
1
,
"run_cmd"
,
int
sleep_inter
=
-
1
)
->
const
std
::
string
{
[](
const
std
::
string
&
cmd
,
return
paddle
::
framework
::
shell_get_command_output
(
cmd
,
time_out
,
int
time_out
=
-
1
,
sleep_inter
);
int
sleep_inter
=
-
1
)
->
const
std
::
string
{
},
return
paddle
::
framework
::
shell_get_command_output
(
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
-
1
,
py
::
arg
(
"sleep_inter"
)
=
-
1
);
cmd
,
time_out
,
sleep_inter
);
m
.
def
(
"shell_execute_cmd"
,
},
[](
const
std
::
string
&
cmd
,
int
time_out
=
0
,
int
sleep_inter
=
0
,
py
::
arg
(
"cmd"
),
bool
redirect_stderr
=
false
)
->
std
::
vector
<
std
::
string
>
{
py
::
arg
(
"time_out"
)
=
-
1
,
return
paddle
::
framework
::
shell_execute_cmd
(
py
::
arg
(
"sleep_inter"
)
=
-
1
);
cmd
,
time_out
,
sleep_inter
,
redirect_stderr
);
m
.
def
(
},
"shell_execute_cmd"
,
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
0
,
py
::
arg
(
"sleep_inter"
)
=
0
,
[](
const
std
::
string
&
cmd
,
py
::
arg
(
"redirect_stderr"
)
=
false
);
int
time_out
=
0
,
int
sleep_inter
=
0
,
bool
redirect_stderr
=
false
)
->
std
::
vector
<
std
::
string
>
{
return
paddle
::
framework
::
shell_execute_cmd
(
cmd
,
time_out
,
sleep_inter
,
redirect_stderr
);
},
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
0
,
py
::
arg
(
"sleep_inter"
)
=
0
,
py
::
arg
(
"redirect_stderr"
)
=
false
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
m
.
def
(
"is_float16_supported"
,
[](
const
platform
::
CUDAPlace
&
place
)
->
bool
{
m
.
def
(
"is_float16_supported"
,
[](
const
platform
::
CUDAPlace
&
place
)
->
bool
{
...
@@ -2979,13 +3147,16 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2979,13 +3147,16 @@ All parameter, weight, gradient are variables in Paddle.
#endif
#endif
m
.
def
(
"set_feed_variable"
,
m
.
def
(
"set_feed_variable"
,
static_cast
<
void
(
*
)(
Scope
*
,
const
LoDTensor
&
,
const
std
::
string
&
,
static_cast
<
void
(
*
)(
size_t
)
>
(
&
framework
::
SetFeedVariable
));
Scope
*
,
const
LoDTensor
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
m
.
def
(
"set_feed_variable"
,
m
.
def
(
"set_feed_variable"
,
static_cast
<
void
(
*
)(
Scope
*
,
const
Strings
&
,
const
std
::
string
&
,
static_cast
<
void
(
*
)(
size_t
)
>
(
&
framework
::
SetFeedVariable
));
Scope
*
,
const
Strings
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
m
.
def
(
"get_fetch_variable"
,
m
.
def
(
"get_fetch_variable"
,
[](
const
Scope
&
scope
,
const
std
::
string
&
var_name
,
[](
const
Scope
&
scope
,
const
std
::
string
&
var_name
,
size_t
index
)
->
py
::
object
{
size_t
index
)
->
py
::
object
{
auto
&
var
=
framework
::
GetFetchVariable
(
scope
,
var_name
,
index
);
auto
&
var
=
framework
::
GetFetchVariable
(
scope
,
var_name
,
index
);
if
(
data_is_lod_tensor
(
var
))
{
if
(
data_is_lod_tensor
(
var
))
{
...
@@ -3033,26 +3204,30 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3033,26 +3204,30 @@ All parameter, weight, gradient are variables in Paddle.
pylodtensorarray
pylodtensorarray
.
def
(
"__init__"
,
.
def
(
"__init__"
,
[](
LoDTensorArray
&
instance
)
{
new
(
&
instance
)
LoDTensorArray
();
})
[](
LoDTensorArray
&
instance
)
{
new
(
&
instance
)
LoDTensorArray
();
})
.
def
(
"__getitem__"
,
.
def
(
[](
LoDTensorArray
&
self
,
size_t
i
)
{
return
&
self
.
at
(
i
);
},
"__getitem__"
,
py
::
return_value_policy
::
reference
)
[](
LoDTensorArray
&
self
,
size_t
i
)
{
return
&
self
.
at
(
i
);
},
py
::
return_value_policy
::
reference
)
.
def
(
"__len__"
,
[](
LoDTensorArray
&
self
)
{
return
self
.
size
();
})
.
def
(
"__len__"
,
[](
LoDTensorArray
&
self
)
{
return
self
.
size
();
})
.
def
(
"__setitem__"
,
.
def
(
"__setitem__"
,
[](
LoDTensorArray
&
self
,
size_t
i
,
const
LoDTensor
&
t
)
{
[](
LoDTensorArray
&
self
,
size_t
i
,
const
LoDTensor
&
t
)
{
PADDLE_ENFORCE_LT
(
i
,
self
.
size
(),
PADDLE_ENFORCE_LT
(
i
,
self
.
size
(),
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The index to set is larger than the size "
"The index to set is larger than the size "
"of LoDTensorArray."
));
"of LoDTensorArray."
));
self
[
i
].
ShareDataWith
(
t
);
self
[
i
].
ShareDataWith
(
t
);
self
[
i
].
set_lod
(
t
.
lod
());
self
[
i
].
set_lod
(
t
.
lod
());
})
})
.
def
(
"append"
,
.
def
(
[](
LoDTensorArray
&
self
,
const
LoDTensor
&
t
)
{
"append"
,
self
.
emplace_back
();
[](
LoDTensorArray
&
self
,
const
LoDTensor
&
t
)
{
self
.
back
().
ShareDataWith
(
t
);
self
.
emplace_back
();
self
.
back
().
set_lod
(
t
.
lod
());
self
.
back
().
ShareDataWith
(
t
);
},
self
.
back
().
set_lod
(
t
.
lod
());
py
::
arg
(
"tensor"
),
R"DOC(
},
py
::
arg
(
"tensor"
),
R"DOC(
Append a LoDensor to LoDTensorArray.
Append a LoDensor to LoDTensorArray.
Args:
Args:
...
@@ -3072,89 +3247,94 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3072,89 +3247,94 @@ All parameter, weight, gradient are variables in Paddle.
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
arr.append(t)
arr.append(t)
)DOC"
)
)DOC"
)
.
def
(
"_move_to_list"
,
.
def
(
[](
LoDTensorArray
&
self
)
->
py
::
list
{
"_move_to_list"
,
py
::
list
res
(
self
.
size
());
[](
LoDTensorArray
&
self
)
->
py
::
list
{
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
py
::
list
res
(
self
.
size
());
res
[
i
]
=
py
::
cast
(
std
::
move
(
self
[
i
]));
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
}
res
[
i
]
=
py
::
cast
(
std
::
move
(
self
[
i
]));
self
.
clear
();
}
return
res
;
self
.
clear
();
},
return
res
;
py
::
return_value_policy
::
take_ownership
);
},
py
::
return_value_policy
::
take_ownership
);
py
::
class_
<
FetchList
>
(
m
,
"FetchList"
,
R"DOC( FetchList is a
py
::
class_
<
FetchList
>
(
m
,
"FetchList"
,
R"DOC( FetchList is a
vector of boost::variant<LoDTensor, LoDTensorArray>.
vector of boost::variant<LoDTensor, LoDTensorArray>.
)DOC"
)
)DOC"
)
.
def
(
"_move_to_list"
,
.
def
(
[](
FetchList
&
self
)
->
py
::
list
{
"_move_to_list"
,
py
::
list
res
(
self
.
size
());
[](
FetchList
&
self
)
->
py
::
list
{
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
py
::
list
res
(
self
.
size
());
if
(
data_is_lod_tensor
(
self
[
i
]))
{
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
auto
&
data
=
BOOST_GET
(
LoDTensor
,
self
[
i
]);
if
(
data_is_lod_tensor
(
self
[
i
]))
{
res
[
i
]
=
py
::
cast
(
std
::
move
(
data
));
auto
&
data
=
BOOST_GET
(
LoDTensor
,
self
[
i
]);
}
else
{
res
[
i
]
=
py
::
cast
(
std
::
move
(
data
));
auto
&
data
=
BOOST_GET
(
LoDTensorArray
,
self
[
i
]);
}
else
{
py
::
list
tmp
(
data
.
size
());
auto
&
data
=
BOOST_GET
(
LoDTensorArray
,
self
[
i
]);
for
(
size_t
j
=
0
;
j
<
data
.
size
();
++
j
)
{
py
::
list
tmp
(
data
.
size
());
tmp
[
j
]
=
py
::
cast
(
std
::
move
(
data
[
j
]));
for
(
size_t
j
=
0
;
j
<
data
.
size
();
++
j
)
{
}
tmp
[
j
]
=
py
::
cast
(
std
::
move
(
data
[
j
]));
res
[
i
]
=
std
::
move
(
tmp
);
}
}
res
[
i
]
=
std
::
move
(
tmp
);
}
}
self
.
clear
();
}
return
res
;
self
.
clear
();
},
return
res
;
py
::
return_value_policy
::
take_ownership
)
},
py
::
return_value_policy
::
take_ownership
)
.
def
(
"append"
,
.
def
(
[](
FetchList
&
self
,
const
LoDTensor
&
t
)
{
"append"
,
self
.
emplace_back
();
[](
FetchList
&
self
,
const
LoDTensor
&
t
)
{
auto
&
lod_tensor
=
BOOST_GET
(
LoDTensor
,
self
.
back
());
self
.
emplace_back
();
lod_tensor
.
ShareDataWith
(
t
);
auto
&
lod_tensor
=
BOOST_GET
(
LoDTensor
,
self
.
back
());
lod_tensor
.
set_lod
(
t
.
lod
());
lod_tensor
.
ShareDataWith
(
t
);
},
lod_tensor
.
set_lod
(
t
.
lod
());
py
::
arg
(
"var"
))
},
py
::
arg
(
"var"
))
.
def
(
"append"
,
[](
FetchList
&
self
,
const
LoDTensorArray
&
t
)
{
.
def
(
self
.
emplace_back
();
"append"
,
auto
&
lod_tensor_array
=
BOOST_GET
(
LoDTensorArray
,
self
.
back
());
[](
FetchList
&
self
,
const
LoDTensorArray
&
t
)
{
for
(
size_t
i
=
0
;
i
<
t
.
size
();
++
i
)
{
self
.
emplace_back
();
lod_tensor_array
[
i
].
ShareDataWith
(
t
[
i
]);
auto
&
lod_tensor_array
=
BOOST_GET
(
LoDTensorArray
,
self
.
back
());
lod_tensor_array
[
i
].
set_lod
(
t
[
i
].
lod
());
for
(
size_t
i
=
0
;
i
<
t
.
size
();
++
i
)
{
}
lod_tensor_array
[
i
].
ShareDataWith
(
t
[
i
]);
},
lod_tensor_array
[
i
].
set_lod
(
t
[
i
].
lod
());
py
::
arg
(
"var"
));
}
},
py
::
arg
(
"var"
));
py
::
class_
<
FetchUnmergedList
>
(
m
,
"FetchUnmergedList"
,
R"DOC(
py
::
class_
<
FetchUnmergedList
>
(
m
,
"FetchUnmergedList"
,
R"DOC(
FetchUnmergedList is 2-D array of FetchType(boost::variant(LoDTensor, LoDTensorArray)).
FetchUnmergedList is 2-D array of FetchType(boost::variant(LoDTensor, LoDTensorArray)).
)DOC"
)
)DOC"
)
.
def
(
"_move_to_list"
,
.
def
(
[](
FetchUnmergedList
&
self
)
->
py
::
list
{
"_move_to_list"
,
py
::
list
res
(
self
.
size
());
[](
FetchUnmergedList
&
self
)
->
py
::
list
{
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
py
::
list
res
(
self
.
size
());
py
::
list
tmp
(
self
[
i
].
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
for
(
size_t
j
=
0
;
j
<
self
[
i
].
size
();
++
j
)
{
py
::
list
tmp
(
self
[
i
].
size
());
if
(
data_is_lod_tensor
(
self
[
i
][
j
]))
{
for
(
size_t
j
=
0
;
j
<
self
[
i
].
size
();
++
j
)
{
auto
&
var
=
BOOST_GET
(
LoDTensor
,
self
[
i
][
j
]);
if
(
data_is_lod_tensor
(
self
[
i
][
j
]))
{
tmp
[
j
]
=
py
::
cast
(
std
::
move
(
var
));
auto
&
var
=
BOOST_GET
(
LoDTensor
,
self
[
i
][
j
]);
}
else
{
tmp
[
j
]
=
py
::
cast
(
std
::
move
(
var
));
auto
&
var
=
BOOST_GET
(
LoDTensorArray
,
self
[
i
][
j
]);
}
else
{
py
::
list
tmp_array
(
var
.
size
());
auto
&
var
=
BOOST_GET
(
LoDTensorArray
,
self
[
i
][
j
]);
for
(
size_t
k
=
0
;
k
<
var
.
size
();
++
k
)
{
py
::
list
tmp_array
(
var
.
size
());
tmp_array
[
k
]
=
std
::
move
(
var
[
k
]);
for
(
size_t
k
=
0
;
k
<
var
.
size
();
++
k
)
{
}
tmp_array
[
k
]
=
std
::
move
(
var
[
k
]);
tmp
[
j
]
=
std
::
move
(
tmp_array
);
}
}
tmp
[
j
]
=
std
::
move
(
tmp_array
);
}
}
res
[
i
]
=
std
::
move
(
tmp
);
}
self
[
i
].
clear
();
res
[
i
]
=
std
::
move
(
tmp
);
}
self
[
i
].
clear
();
self
.
clear
();
}
return
res
;
self
.
clear
();
},
return
res
;
py
::
return_value_policy
::
take_ownership
);
},
py
::
return_value_policy
::
take_ownership
);
m
.
def
(
"op_support_gpu"
,
OpSupportGPU
);
m
.
def
(
"op_support_gpu"
,
OpSupportGPU
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...
@@ -3168,11 +3348,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3168,11 +3348,12 @@ All parameter, weight, gradient are variables in Paddle.
}
}
platform
::
EmptyCache
();
platform
::
EmptyCache
();
});
});
m
.
def
(
"get_device_properties"
,
m
.
def
(
[](
int
id
)
->
const
gpuDeviceProp
&
{
"get_device_properties"
,
return
platform
::
GetDeviceProperties
(
id
);
[](
int
id
)
->
const
gpuDeviceProp
&
{
},
return
platform
::
GetDeviceProperties
(
id
);
py
::
return_value_policy
::
copy
);
},
py
::
return_value_policy
::
copy
);
py
::
class_
<
gpuDeviceProp
>
(
m
,
"_gpuDeviceProperties"
)
py
::
class_
<
gpuDeviceProp
>
(
m
,
"_gpuDeviceProperties"
)
.
def_property_readonly
(
.
def_property_readonly
(
...
@@ -3283,18 +3464,20 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3283,18 +3464,20 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"reset_profiler"
,
platform
::
ResetProfiler
);
m
.
def
(
"reset_profiler"
,
platform
::
ResetProfiler
);
m
.
def
(
"register_pass"
,
[](
const
std
::
string
&
pass_type
,
py
::
object
callable
)
{
m
.
def
(
"register_pass"
,
[](
const
std
::
string
&
pass_type
,
py
::
object
callable
)
{
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
framework
::
ir
::
PassRegistry
::
Instance
().
Has
(
pass_type
),
false
,
framework
::
ir
::
PassRegistry
::
Instance
().
Has
(
pass_type
),
false
,
platform
::
errors
::
AlreadyExists
(
"Pass '%s' is registered more than "
platform
::
errors
::
AlreadyExists
(
"Pass '%s' is registered more than "
"once. Please use another name."
,
"once. Please use another name."
,
pass_type
));
pass_type
));
callable
.
inc_ref
();
callable
.
inc_ref
();
framework
::
ir
::
PassRegistry
::
Instance
().
Insert
(
pass_type
,
[
pass_type
,
framework
::
ir
::
PassRegistry
::
Instance
().
Insert
(
callable
]()
{
pass_type
,
[
pass_type
,
callable
]()
{
py
::
gil_scoped_acquire
guard
;
py
::
gil_scoped_acquire
guard
;
std
::
unique_ptr
<
framework
::
ir
::
Pass
>
pass
(
std
::
unique_ptr
<
framework
::
ir
::
Pass
>
pass
(
new
framework
::
ir
::
GeneratePass
(
py
::
cast
<
std
::
string
>
(
callable
())));
new
framework
::
ir
::
GeneratePass
(
return
pass
;
py
::
cast
<
std
::
string
>
(
callable
())));
});
return
pass
;
});
});
});
m
.
def
(
"get_pass"
,
[](
const
std
::
string
&
pass_type
)
{
m
.
def
(
"get_pass"
,
[](
const
std
::
string
&
pass_type
)
{
auto
pass
=
framework
::
ir
::
PassRegistry
::
Instance
().
Get
(
pass_type
);
auto
pass
=
framework
::
ir
::
PassRegistry
::
Instance
().
Get
(
pass_type
);
...
@@ -3304,7 +3487,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3304,7 +3487,8 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"size_of_dtype"
,
framework
::
SizeOfType
);
m
.
def
(
"size_of_dtype"
,
framework
::
SizeOfType
);
py
::
class_
<
paddle
::
platform
::
ProfilerResult
>
(
m
,
"_ProfilerResult"
)
py
::
class_
<
paddle
::
platform
::
ProfilerResult
>
(
m
,
"_ProfilerResult"
)
.
def
(
py
::
init
<>
())
.
def
(
py
::
init
<>
())
.
def
(
"get_data"
,
&
paddle
::
platform
::
ProfilerResult
::
GetData
,
.
def
(
"get_data"
,
&
paddle
::
platform
::
ProfilerResult
::
GetData
,
py
::
return_value_policy
::
automatic_reference
)
py
::
return_value_policy
::
automatic_reference
)
.
def
(
"save"
,
&
paddle
::
platform
::
ProfilerResult
::
Save
)
.
def
(
"save"
,
&
paddle
::
platform
::
ProfilerResult
::
Save
)
.
def
(
"get_extra_info"
,
&
paddle
::
platform
::
ProfilerResult
::
GetExtraInfo
);
.
def
(
"get_extra_info"
,
&
paddle
::
platform
::
ProfilerResult
::
GetExtraInfo
);
...
@@ -3339,7 +3523,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3339,7 +3523,8 @@ All parameter, weight, gradient are variables in Paddle.
&
paddle
::
platform
::
HostPythonNode
::
device_node_ptrs
);
&
paddle
::
platform
::
HostPythonNode
::
device_node_ptrs
);
py
::
class_
<
paddle
::
platform
::
Profiler
>
(
m
,
"_Profiler"
)
py
::
class_
<
paddle
::
platform
::
Profiler
>
(
m
,
"_Profiler"
)
.
def
(
"create"
,
&
paddle
::
platform
::
Profiler
::
Create
,
.
def
(
"create"
,
&
paddle
::
platform
::
Profiler
::
Create
,
py
::
return_value_policy
::
take_ownership
)
py
::
return_value_policy
::
take_ownership
)
.
def
(
"is_cupti_supported"
,
&
paddle
::
platform
::
Profiler
::
IsCuptiSupported
)
.
def
(
"is_cupti_supported"
,
&
paddle
::
platform
::
Profiler
::
IsCuptiSupported
)
.
def
(
"is_cnpapi_supported"
,
.
def
(
"is_cnpapi_supported"
,
...
@@ -3350,12 +3535,13 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3350,12 +3535,13 @@ All parameter, weight, gradient are variables in Paddle.
profiler
->
Prepare
();
profiler
->
Prepare
();
})
})
.
def
(
"start"
,
&
paddle
::
platform
::
Profiler
::
Start
)
.
def
(
"start"
,
&
paddle
::
platform
::
Profiler
::
Start
)
.
def
(
"stop"
,
.
def
(
[](
paddle
::
platform
::
Profiler
*
profiler
)
{
"stop"
,
platform
::
DisableHostEventRecorder
();
[](
paddle
::
platform
::
Profiler
*
profiler
)
{
return
profiler
->
Stop
();
platform
::
DisableHostEventRecorder
();
},
return
profiler
->
Stop
();
py
::
return_value_policy
::
automatic_reference
);
},
py
::
return_value_policy
::
automatic_reference
);
py
::
class_
<
paddle
::
platform
::
ProfilerOptions
>
(
m
,
"ProfilerOptions"
)
py
::
class_
<
paddle
::
platform
::
ProfilerOptions
>
(
m
,
"ProfilerOptions"
)
.
def
(
py
::
init
<>
())
.
def
(
py
::
init
<>
())
...
@@ -3412,22 +3598,29 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3412,22 +3598,29 @@ All parameter, weight, gradient are variables in Paddle.
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
const
std
::
string
&
attr
)
{
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
const
std
::
string
&
attr
)
{
self
.
Set
<
std
::
string
>
(
name
,
new
std
::
string
(
attr
));
self
.
Set
<
std
::
string
>
(
name
,
new
std
::
string
(
attr
));
})
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
bool
val
)
{
self
.
Set
<
bool
>
(
name
,
new
bool
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
int
val
)
{
self
.
Set
<
const
int
>
(
name
,
new
int
(
val
));
})
.
def
(
"set"
,
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
bool
val
)
{
self
.
Set
<
bool
>
(
name
,
new
bool
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
int
val
)
{
self
.
Set
<
const
int
>
(
name
,
new
int
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
vector
<
std
::
string
>
set
)
{
std
::
vector
<
std
::
string
>
set
)
{
self
.
Set
(
name
,
new
std
::
vector
<
std
::
string
>
(
set
));
self
.
Set
(
name
,
new
std
::
vector
<
std
::
string
>
(
set
));
})
})
.
def
(
"set"
,
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
unordered_set
<
std
::
string
>
set
)
{
std
::
unordered_set
<
std
::
string
>
set
)
{
self
.
Set
(
name
,
new
std
::
unordered_set
<
std
::
string
>
(
set
));
self
.
Set
(
name
,
new
std
::
unordered_set
<
std
::
string
>
(
set
));
})
})
.
def
(
"set"
,
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
unordered_set
<
int
>
set
)
{
std
::
unordered_set
<
int
>
set
)
{
self
.
Set
(
name
,
new
std
::
unordered_set
<
int
>
(
set
));
self
.
Set
(
name
,
new
std
::
unordered_set
<
int
>
(
set
));
})
})
...
@@ -3604,11 +3797,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3604,11 +3797,12 @@ All parameter, weight, gradient are variables in Paddle.
},
},
R"DOC(This config that the this is distributed training with parameter server
R"DOC(This config that the this is distributed training with parameter server
)DOC"
)
)DOC"
)
.
def_property
(
"_dry_run"
,
.
def_property
(
[](
const
ExecutionStrategy
&
self
)
{
return
self
.
dry_run_
;
},
"_dry_run"
,
[](
ExecutionStrategy
&
self
,
bool
dry_run
)
{
[](
const
ExecutionStrategy
&
self
)
{
return
self
.
dry_run_
;
},
self
.
dry_run_
=
dry_run
;
[](
ExecutionStrategy
&
self
,
bool
dry_run
)
{
});
self
.
dry_run_
=
dry_run
;
});
exec_strategy
.
def_property
(
exec_strategy
.
def_property
(
"use_experimental_executor"
,
"use_experimental_executor"
,
...
@@ -3671,7 +3865,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3671,7 +3865,8 @@ All parameter, weight, gradient are variables in Paddle.
"reduce_strategy"
,
"reduce_strategy"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
reduce_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
reduce_
;
},
[](
BuildStrategy
&
self
,
BuildStrategy
::
ReduceStrategy
strategy
)
{
[](
BuildStrategy
&
self
,
BuildStrategy
::
ReduceStrategy
strategy
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3701,7 +3896,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3701,7 +3896,8 @@ All parameter, weight, gradient are variables in Paddle.
[](
const
BuildStrategy
&
self
)
{
return
self
.
gradient_scale_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
gradient_scale_
;
},
[](
BuildStrategy
&
self
,
[](
BuildStrategy
&
self
,
BuildStrategy
::
GradientScaleStrategy
strategy
)
{
BuildStrategy
::
GradientScaleStrategy
strategy
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3766,7 +3962,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3766,7 +3962,8 @@ All parameter, weight, gradient are variables in Paddle.
"debug_graphviz_path"
,
"debug_graphviz_path"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
debug_graphviz_path_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
debug_graphviz_path_
;
},
[](
BuildStrategy
&
self
,
const
std
::
string
&
path
)
{
[](
BuildStrategy
&
self
,
const
std
::
string
&
path
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3793,7 +3990,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3793,7 +3990,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
enable_sequential_execution_
;
return
self
.
enable_sequential_execution_
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3819,7 +4017,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3819,7 +4017,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
remove_unnecessary_lock_
;
return
self
.
remove_unnecessary_lock_
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3856,11 +4055,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3856,11 +4055,12 @@ All parameter, weight, gradient are variables in Paddle.
const
std
::
vector
<
std
::
string
>
&
trainers_endpoints
)
{
const
std
::
vector
<
std
::
string
>
&
trainers_endpoints
)
{
self
.
trainers_endpoints_
=
trainers_endpoints
;
self
.
trainers_endpoints_
=
trainers_endpoints
;
})
})
.
def_property
(
"trainer_id"
,
.
def_property
(
[](
const
BuildStrategy
&
self
)
{
return
self
.
trainer_id_
;
},
"trainer_id"
,
[](
BuildStrategy
&
self
,
int
trainer_id
)
{
[](
const
BuildStrategy
&
self
)
{
return
self
.
trainer_id_
;
},
self
.
trainer_id_
=
trainer_id
;
[](
BuildStrategy
&
self
,
int
trainer_id
)
{
})
self
.
trainer_id_
=
trainer_id
;
})
.
def_property
(
.
def_property
(
"nccl_comm_num"
,
"nccl_comm_num"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
nccl_comm_num_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
nccl_comm_num_
;
},
...
@@ -3873,20 +4073,22 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3873,20 +4073,22 @@ All parameter, weight, gradient are variables in Paddle.
[](
BuildStrategy
&
self
,
int
bkcl_comm_num
)
{
[](
BuildStrategy
&
self
,
int
bkcl_comm_num
)
{
self
.
bkcl_comm_num_
=
bkcl_comm_num
;
self
.
bkcl_comm_num_
=
bkcl_comm_num
;
})
})
.
def_property
(
"use_hierarchical_allreduce"
,
.
def_property
(
[](
const
BuildStrategy
&
self
)
{
"use_hierarchical_allreduce"
,
return
self
.
use_hierarchical_allreduce_
;
[](
const
BuildStrategy
&
self
)
{
},
return
self
.
use_hierarchical_allreduce_
;
[](
BuildStrategy
&
self
,
bool
use
)
{
},
self
.
use_hierarchical_allreduce_
=
use
;
[](
BuildStrategy
&
self
,
bool
use
)
{
})
self
.
use_hierarchical_allreduce_
=
use
;
.
def_property
(
"hierarchical_allreduce_inter_nranks"
,
})
[](
const
BuildStrategy
&
self
)
{
.
def_property
(
return
self
.
hierarchical_allreduce_inter_nranks_
;
"hierarchical_allreduce_inter_nranks"
,
},
[](
const
BuildStrategy
&
self
)
{
[](
BuildStrategy
&
self
,
int
nranks
)
{
return
self
.
hierarchical_allreduce_inter_nranks_
;
self
.
hierarchical_allreduce_inter_nranks_
=
nranks
;
},
})
[](
BuildStrategy
&
self
,
int
nranks
)
{
self
.
hierarchical_allreduce_inter_nranks_
=
nranks
;
})
.
def_property
(
.
def_property
(
"fuse_elewise_add_act_ops"
,
"fuse_elewise_add_act_ops"
,
...
@@ -3894,7 +4096,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3894,7 +4096,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
fuse_elewise_add_act_ops_
;
return
self
.
fuse_elewise_add_act_ops_
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3919,7 +4122,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3919,7 +4122,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_gemm_epilogue"
,
"fuse_gemm_epilogue"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_gemm_epilogue_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_gemm_epilogue_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3944,7 +4148,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3944,7 +4148,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_bn_act_ops"
,
"fuse_bn_act_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_act_ops_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3969,7 +4174,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3969,7 +4174,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_bn_add_act_ops"
,
"fuse_bn_add_act_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_add_act_ops_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_add_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3994,7 +4200,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3994,7 +4200,8 @@ All parameter, weight, gradient are variables in Paddle.
"enable_auto_fusion"
,
"enable_auto_fusion"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_auto_fusion_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_auto_fusion_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -4022,7 +4229,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4022,7 +4229,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
fuse_relu_depthwise_conv_
;
return
self
.
fuse_relu_depthwise_conv_
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -4045,19 +4253,21 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4045,19 +4253,21 @@ All parameter, weight, gradient are variables in Paddle.
build_strategy = static.BuildStrategy()
build_strategy = static.BuildStrategy()
build_strategy.fuse_relu_depthwise_conv = True
build_strategy.fuse_relu_depthwise_conv = True
)DOC"
)
)DOC"
)
.
def_property
(
"fuse_broadcast_ops"
,
.
def_property
(
[](
const
BuildStrategy
&
self
)
{
"fuse_broadcast_ops"
,
return
self
.
fuse_broadcast_ops_
==
true
||
[](
const
BuildStrategy
&
self
)
{
self
.
fuse_broadcast_ops_
==
paddle
::
none
;
return
self
.
fuse_broadcast_ops_
==
true
||
},
self
.
fuse_broadcast_ops_
==
paddle
::
none
;
[](
BuildStrategy
&
self
,
bool
b
)
{
},
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
[](
BuildStrategy
&
self
,
bool
b
)
{
platform
::
errors
::
PreconditionNotMet
(
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
"BuildStrategy has been finlaized, "
true
,
"cannot be configured again."
));
platform
::
errors
::
PreconditionNotMet
(
self
.
fuse_broadcast_ops_
=
b
;
"BuildStrategy has been finlaized, "
},
"cannot be configured again."
));
R"DOC((bool, optional): fuse_broadcast_op indicates whether
self
.
fuse_broadcast_ops_
=
b
;
},
R"DOC((bool, optional): fuse_broadcast_op indicates whether
to fuse the broadcast ops. Note that, in Reduce mode,
to fuse the broadcast ops. Note that, in Reduce mode,
fusing broadcast ops may make the program faster. Because
fusing broadcast ops may make the program faster. Because
fusing broadcast OP equals delaying the execution of all
fusing broadcast OP equals delaying the execution of all
...
@@ -4075,23 +4285,26 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4075,23 +4285,26 @@ All parameter, weight, gradient are variables in Paddle.
build_strategy = static.BuildStrategy()
build_strategy = static.BuildStrategy()
build_strategy.fuse_broadcast_ops = True
build_strategy.fuse_broadcast_ops = True
)DOC"
)
)DOC"
)
.
def_property
(
"fuse_all_optimizer_ops"
,
.
def_property
(
[](
const
BuildStrategy
&
self
)
{
"fuse_all_optimizer_ops"
,
return
self
.
fuse_all_optimizer_ops_
==
true
||
[](
const
BuildStrategy
&
self
)
{
self
.
fuse_all_optimizer_ops_
==
paddle
::
none
;
return
self
.
fuse_all_optimizer_ops_
==
true
||
},
self
.
fuse_all_optimizer_ops_
==
paddle
::
none
;
[](
BuildStrategy
&
self
,
bool
b
)
{
},
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
[](
BuildStrategy
&
self
,
bool
b
)
{
platform
::
errors
::
PreconditionNotMet
(
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
"BuildStrategy has been finlaized, "
true
,
"cannot be configured again."
));
platform
::
errors
::
PreconditionNotMet
(
self
.
fuse_all_optimizer_ops_
=
b
;
"BuildStrategy has been finlaized, "
})
"cannot be configured again."
));
self
.
fuse_all_optimizer_ops_
=
b
;
})
.
def_property
(
.
def_property
(
"sync_batch_norm"
,
"sync_batch_norm"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
sync_batch_norm_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
sync_batch_norm_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -4169,9 +4382,10 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4169,9 +4382,10 @@ All parameter, weight, gradient are variables in Paddle.
self
.
is_distribution_
=
b
;
self
.
is_distribution_
=
b
;
#endif
#endif
})
})
.
def_property
(
"async_mode"
,
.
def_property
(
[](
const
BuildStrategy
&
self
)
{
return
self
.
async_mode_
;
},
"async_mode"
,
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
async_mode_
=
b
;
})
[](
const
BuildStrategy
&
self
)
{
return
self
.
async_mode_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
async_mode_
=
b
;
})
.
def_property
(
.
def_property
(
"enable_inplace"
,
"enable_inplace"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_inplace_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_inplace_
;
},
...
@@ -4187,13 +4401,14 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4187,13 +4401,14 @@ All parameter, weight, gradient are variables in Paddle.
self
.
fuse_all_reduce_ops_
==
paddle
::
none
;
self
.
fuse_all_reduce_ops_
==
paddle
::
none
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
fuse_all_reduce_ops_
=
b
;
})
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
fuse_all_reduce_ops_
=
b
;
})
.
def_property
(
"enable_backward_optimizer_op_deps"
,
.
def_property
(
[](
const
BuildStrategy
&
self
)
{
"enable_backward_optimizer_op_deps"
,
return
self
.
enable_backward_optimizer_op_deps_
;
[](
const
BuildStrategy
&
self
)
{
},
return
self
.
enable_backward_optimizer_op_deps_
;
[](
BuildStrategy
&
self
,
bool
b
)
{
},
self
.
enable_backward_optimizer_op_deps_
=
b
;
[](
BuildStrategy
&
self
,
bool
b
)
{
})
self
.
enable_backward_optimizer_op_deps_
=
b
;
})
.
def_property
(
.
def_property
(
"cache_runtime_context"
,
"cache_runtime_context"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
cache_runtime_context_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
cache_runtime_context_
;
},
...
@@ -4213,24 +4428,26 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4213,24 +4428,26 @@ All parameter, weight, gradient are variables in Paddle.
[](
BuildStrategy
&
self
,
bool
fix_op_run_order
)
{
[](
BuildStrategy
&
self
,
bool
fix_op_run_order
)
{
self
.
fix_op_run_order_
=
fix_op_run_order
;
self
.
fix_op_run_order_
=
fix_op_run_order
;
})
})
.
def_property
(
"allow_cuda_graph_capture"
,
.
def_property
(
[](
const
BuildStrategy
&
self
)
{
"allow_cuda_graph_capture"
,
return
self
.
allow_cuda_graph_capture_
;
[](
const
BuildStrategy
&
self
)
{
},
return
self
.
allow_cuda_graph_capture_
;
[](
BuildStrategy
&
self
,
bool
allow_cuda_graph_capture
)
{
},
self
.
allow_cuda_graph_capture_
=
allow_cuda_graph_capture
;
[](
BuildStrategy
&
self
,
bool
allow_cuda_graph_capture
)
{
})
self
.
allow_cuda_graph_capture_
=
allow_cuda_graph_capture
;
})
.
def
(
"_copy"
,
.
def
(
"_copy"
,
[](
const
BuildStrategy
&
self
)
{
[](
const
BuildStrategy
&
self
)
{
auto
new_bs
=
self
;
auto
new_bs
=
self
;
new_bs
.
ClearFinalized
();
new_bs
.
ClearFinalized
();
return
new_bs
;
return
new_bs
;
})
})
.
def
(
"_finalize_strategy_and_create_passes"
,
.
def
(
[](
BuildStrategy
&
self
)
->
std
::
shared_ptr
<
ir
::
PassBuilder
>
{
"_finalize_strategy_and_create_passes"
,
return
self
.
CreatePassesFromStrategy
(
true
);
[](
BuildStrategy
&
self
)
->
std
::
shared_ptr
<
ir
::
PassBuilder
>
{
},
return
self
.
CreatePassesFromStrategy
(
true
);
R"DOC(Allow user to customized passes. Normally model-specific
},
R"DOC(Allow user to customized passes. Normally model-specific
optimization passes should be defined in this way. BuildStrategy
optimization passes should be defined in this way. BuildStrategy
cannot be updated after being finalized.)DOC"
);
cannot be updated after being finalized.)DOC"
);
...
@@ -4241,18 +4458,23 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4241,18 +4458,23 @@ All parameter, weight, gradient are variables in Paddle.
});
});
pe
.
def
(
py
::
init
<
const
std
::
vector
<
platform
::
Place
>
&
,
pe
.
def
(
py
::
init
<
const
std
::
vector
<
platform
::
Place
>
&
,
const
std
::
vector
<
std
::
string
>
&
,
const
std
::
string
&
,
const
std
::
vector
<
std
::
string
>
&
,
Scope
*
,
std
::
vector
<
Scope
*>
&
,
const
ExecutionStrategy
&
,
const
std
::
string
&
,
const
BuildStrategy
&
,
ir
::
Graph
*>
())
Scope
*
,
std
::
vector
<
Scope
*>
&
,
const
ExecutionStrategy
&
,
const
BuildStrategy
&
,
ir
::
Graph
*>
())
// NOTE: even we return a vec<Scope*>* to Python use reference policy.
// NOTE: even we return a vec<Scope*>* to Python use reference policy.
// We still cannot get local_scope from this vector, since the element
// We still cannot get local_scope from this vector, since the element
// of vec<Scope*> will be freed by Python GC. We can only return Scope*
// of vec<Scope*> will be freed by Python GC. We can only return Scope*
// one by one and mark them as reference.
// one by one and mark them as reference.
.
def
(
"local_scopes"
,
.
def
(
[](
ParallelExecutor
&
self
)
->
std
::
vector
<
Scope
*>
*
{
"local_scopes"
,
return
&
self
.
GetLocalScopes
();
[](
ParallelExecutor
&
self
)
->
std
::
vector
<
Scope
*>
*
{
},
return
&
self
.
GetLocalScopes
();
py
::
return_value_policy
::
reference
)
},
py
::
return_value_policy
::
reference
)
.
def
(
"drop_local_exe_scopes"
,
&
ParallelExecutor
::
DropLocalExeScopes
)
.
def
(
"drop_local_exe_scopes"
,
&
ParallelExecutor
::
DropLocalExeScopes
)
.
def
(
"_need_create_local_exe_scopes"
,
.
def
(
"_need_create_local_exe_scopes"
,
&
ParallelExecutor
::
NeedCreateLocalExeScope
)
&
ParallelExecutor
::
NeedCreateLocalExeScope
)
...
@@ -4284,12 +4506,13 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4284,12 +4506,13 @@ All parameter, weight, gradient are variables in Paddle.
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>>
(
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>>
(
m
,
"IpuBackend"
)
m
,
"IpuBackend"
)
// manage IpuBackend in C++
// manage IpuBackend in C++
.
def
(
"get_instance"
,
.
def
(
[]()
{
"get_instance"
,
return
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>
(
[]()
{
platform
::
ipu
::
IpuBackend
::
GetInstance
());
return
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>
(
},
platform
::
ipu
::
IpuBackend
::
GetInstance
());
py
::
return_value_policy
::
reference
)
},
py
::
return_value_policy
::
reference
)
.
def
(
"weights_to_host"
,
&
platform
::
ipu
::
IpuBackend
::
WeightsToHost
)
.
def
(
"weights_to_host"
,
&
platform
::
ipu
::
IpuBackend
::
WeightsToHost
)
.
def
(
"detach"
,
&
platform
::
ipu
::
IpuBackend
::
Detach
)
.
def
(
"detach"
,
&
platform
::
ipu
::
IpuBackend
::
Detach
)
.
def
(
"reset"
,
&
platform
::
ipu
::
IpuBackend
::
Reset
)
.
def
(
"reset"
,
&
platform
::
ipu
::
IpuBackend
::
Reset
)
...
@@ -4330,7 +4553,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4330,7 +4553,8 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Failed to convert type: %s when set IpuStrategy "
"Failed to convert type: %s when set IpuStrategy "
"option: %s"
,
"option: %s"
,
option
.
get_type
(),
option_name
));
option
.
get_type
(),
option_name
));
}
}
self
.
InsertStringOption
(
option_name
,
option_val
);
self
.
InsertStringOption
(
option_name
,
option_val
);
}
}
...
@@ -4338,7 +4562,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4338,7 +4562,8 @@ All parameter, weight, gradient are variables in Paddle.
if
(
option_name
.
rfind
(
"location_"
,
0
)
==
0
)
{
if
(
option_name
.
rfind
(
"location_"
,
0
)
==
0
)
{
for
(
auto
option
:
element
.
second
.
cast
<
py
::
dict
>
())
{
for
(
auto
option
:
element
.
second
.
cast
<
py
::
dict
>
())
{
self
.
SetTensorLocation
(
self
.
SetTensorLocation
(
option_name
,
option
.
first
.
cast
<
std
::
string
>
(),
option_name
,
option
.
first
.
cast
<
std
::
string
>
(),
option
.
second
.
cast
<
std
::
uint64_t
>
());
option
.
second
.
cast
<
std
::
uint64_t
>
());
}
}
}
else
if
(
option_name
==
"accumulate_outer_fragment"
)
{
}
else
if
(
option_name
==
"accumulate_outer_fragment"
)
{
...
@@ -4386,17 +4611,19 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4386,17 +4611,19 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Failed to convert value type: %s when set "
"Failed to convert value type: %s when set "
"IpuStrategy option: %s"
,
"IpuStrategy option: %s"
,
option
.
second
.
get_type
(),
option_key
));
option
.
second
.
get_type
(),
option_key
));
}
}
self
.
InsertStringPairOption
(
option_name
,
option_key
,
self
.
InsertStringPairOption
(
option_val
);
option_name
,
option_key
,
option_val
);
}
}
}
}
}
else
{
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Invalid IpuStrategy option value type: %s, please check "
"Invalid IpuStrategy option value type: %s, please check "
"input value for option: %s"
,
"input value for option: %s"
,
element
.
second
.
get_type
(),
option_name
));
element
.
second
.
get_type
(),
option_name
));
}
}
}
}
})
})
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录