Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
69e82d83
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
69e82d83
编写于
6月 29, 2022
作者:
R
ronnywang
提交者:
GitHub
6月 29, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cherry pick 43890 (#43892)
* cherry pick 43890
上级
dc12605d
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
830 addition
and
603 deletion
+830
-603
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+830
-603
未找到文件。
paddle/fluid/pybind/pybind.cc
浏览文件 @
69e82d83
...
@@ -372,7 +372,8 @@ static T PyObjectCast(PyObject *obj) {
...
@@ -372,7 +372,8 @@ static T PyObjectCast(PyObject *obj) {
}
catch
(
py
::
cast_error
&
)
{
}
catch
(
py
::
cast_error
&
)
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Python object is not type of %s, the real type is %s"
,
"Python object is not type of %s, the real type is %s"
,
typeid
(
T
).
name
(),
obj
->
ob_type
->
tp_name
));
typeid
(
T
).
name
(),
obj
->
ob_type
->
tp_name
));
}
}
}
}
...
@@ -431,7 +432,8 @@ static std::vector<std::string> inline GetNameList(
...
@@ -431,7 +432,8 @@ static std::vector<std::string> inline GetNameList(
}
}
static
void
inline
CreateVariableIfNotExit
(
static
void
inline
CreateVariableIfNotExit
(
const
py
::
handle
&
py_handle
,
const
framework
::
Scope
&
scope
,
const
py
::
handle
&
py_handle
,
const
framework
::
Scope
&
scope
,
const
framework
::
Executor
*
exe
=
nullptr
)
{
const
framework
::
Executor
*
exe
=
nullptr
)
{
std
::
vector
<
std
::
string
>
vec_res
;
std
::
vector
<
std
::
string
>
vec_res
;
...
@@ -469,7 +471,8 @@ static void inline CreateVariableIfNotExit(
...
@@ -469,7 +471,8 @@ static void inline CreateVariableIfNotExit(
PyObject
*
py_var_desc
=
PyObject
*
py_var_desc
=
PyObject_GetAttrString
(
PyList_GET_ITEM
(
py_obj
,
i
),
kVarDescField
);
PyObject_GetAttrString
(
PyList_GET_ITEM
(
py_obj
,
i
),
kVarDescField
);
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
py_var_desc
,
platform
::
errors
::
InvalidArgument
(
py_var_desc
,
platform
::
errors
::
InvalidArgument
(
"The var_desc of parameter to set is None"
));
"The var_desc of parameter to set is None"
));
auto
var_desc
=
PyObjectCast
<
framework
::
VarDesc
>
(
py_var_desc
);
auto
var_desc
=
PyObjectCast
<
framework
::
VarDesc
>
(
py_var_desc
);
Py_DECREF
(
py_var_desc
);
Py_DECREF
(
py_var_desc
);
...
@@ -505,7 +508,8 @@ static void AssertStaticGraphAndDygraphGradMakerNoDiff() {
...
@@ -505,7 +508,8 @@ static void AssertStaticGraphAndDygraphGradMakerNoDiff() {
}
}
}
}
}
}
PADDLE_ENFORCE_EQ
(
ops
.
empty
(),
true
,
PADDLE_ENFORCE_EQ
(
ops
.
empty
(),
true
,
platform
::
errors
::
Unimplemented
(
platform
::
errors
::
Unimplemented
(
"OperatorWithKernel [%s] have only static graph grad "
"OperatorWithKernel [%s] have only static graph grad "
"maker or have only dygraph grad maker, which is not "
"maker or have only dygraph grad maker, which is not "
...
@@ -527,8 +531,10 @@ static int GetNCCLVersion() {
...
@@ -527,8 +531,10 @@ static int GetNCCLVersion() {
#endif
#endif
template
<
typename
PlaceType
>
template
<
typename
PlaceType
>
static
void
TensorCopyFrom
(
framework
::
Tensor
*
dst
,
const
framework
::
Tensor
&
src
,
static
void
TensorCopyFrom
(
framework
::
Tensor
*
dst
,
const
PlaceType
&
place
,
int64_t
batch_size
)
{
const
framework
::
Tensor
&
src
,
const
PlaceType
&
place
,
int64_t
batch_size
)
{
if
(
batch_size
<
0
)
{
if
(
batch_size
<
0
)
{
framework
::
TensorCopy
(
src
,
place
,
dst
);
framework
::
TensorCopy
(
src
,
place
,
dst
);
}
else
{
}
else
{
...
@@ -612,7 +618,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -612,7 +618,8 @@ PYBIND11_MODULE(core_noavx, m) {
PyCapsule_GetPointer
(
dltensor
->
ptr
(),
"dltensor"
));
PyCapsule_GetPointer
(
dltensor
->
ptr
(),
"dltensor"
));
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
dmt
,
platform
::
errors
::
InvalidArgument
(
dmt
,
platform
::
errors
::
InvalidArgument
(
"from_dlpack received an invalid capsule. "
"from_dlpack received an invalid capsule. "
"Note that a DLPack tensor can be consumed only once."
));
"Note that a DLPack tensor can be consumed only once."
));
...
@@ -632,7 +639,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -632,7 +639,8 @@ PYBIND11_MODULE(core_noavx, m) {
});
});
m
.
def
(
"_create_loaded_parameter"
,
m
.
def
(
"_create_loaded_parameter"
,
[](
const
py
::
handle
&
vec_var_list
,
const
Scope
&
scope
,
[](
const
py
::
handle
&
vec_var_list
,
const
Scope
&
scope
,
const
Executor
*
executor
)
{
const
Executor
*
executor
)
{
CreateVariableIfNotExit
(
vec_var_list
,
scope
,
executor
);
CreateVariableIfNotExit
(
vec_var_list
,
scope
,
executor
);
});
});
...
@@ -670,8 +678,9 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -670,8 +678,9 @@ PYBIND11_MODULE(core_noavx, m) {
<<
", sci_mode="
<<
print_opt
.
sci_mode
;
<<
", sci_mode="
<<
print_opt
.
sci_mode
;
});
});
m
.
def
(
"broadcast_shape"
,
[](
const
std
::
vector
<
int64_t
>
&
x_dim
,
m
.
def
(
const
std
::
vector
<
int64_t
>
&
y_dim
)
{
"broadcast_shape"
,
[](
const
std
::
vector
<
int64_t
>
&
x_dim
,
const
std
::
vector
<
int64_t
>
&
y_dim
)
{
return
phi
::
vectorize
(
operators
::
details
::
BroadcastTwoDims
(
return
phi
::
vectorize
(
operators
::
details
::
BroadcastTwoDims
(
phi
::
make_ddim
(
x_dim
),
phi
::
make_ddim
(
y_dim
),
-
1
));
phi
::
make_ddim
(
x_dim
),
phi
::
make_ddim
(
y_dim
),
-
1
));
});
});
...
@@ -685,7 +694,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -685,7 +694,8 @@ PYBIND11_MODULE(core_noavx, m) {
m
.
def
(
"_get_use_default_grad_op_desc_maker_ops"
,
m
.
def
(
"_get_use_default_grad_op_desc_maker_ops"
,
[]
{
return
OpInfoMap
::
Instance
().
GetUseDefaultGradOpDescMakerOps
();
});
[]
{
return
OpInfoMap
::
Instance
().
GetUseDefaultGradOpDescMakerOps
();
});
m
.
def
(
"_get_all_register_op_kernels"
,
m
.
def
(
"_get_all_register_op_kernels"
,
[](
const
std
::
string
&
lib
)
{
[](
const
std
::
string
&
lib
)
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
all_kernels_info
;
all_kernels_info
;
...
@@ -712,8 +722,7 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -712,8 +722,7 @@ PYBIND11_MODULE(core_noavx, m) {
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
for
(
auto
&
info_pair
:
kernel_pair
.
second
)
{
framework
::
OpKernelType
kernel_type
=
framework
::
OpKernelType
kernel_type
=
framework
::
TransPhiKernelKeyToOpKernelType
(
info_pair
.
first
);
framework
::
TransPhiKernelKeyToOpKernelType
(
info_pair
.
first
);
auto
kernel_type_str
=
auto
kernel_type_str
=
framework
::
KernelTypeToString
(
kernel_type
);
framework
::
KernelTypeToString
(
kernel_type
);
if
(
all_kernels_info
.
count
(
op_type
))
{
if
(
all_kernels_info
.
count
(
op_type
))
{
if
(
std
::
find
(
all_kernels_info
[
op_type
].
begin
(),
if
(
std
::
find
(
all_kernels_info
[
op_type
].
begin
(),
all_kernels_info
[
op_type
].
end
(),
all_kernels_info
[
op_type
].
end
(),
...
@@ -796,14 +805,22 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -796,14 +805,22 @@ PYBIND11_MODULE(core_noavx, m) {
self
.
EmplaceBackOutput
(
std
::
move
(
CastPyArg2Tensor
(
obj
,
1
)));
self
.
EmplaceBackOutput
(
std
::
move
(
CastPyArg2Tensor
(
obj
,
1
)));
}
}
})
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
.
def
(
"add_attr"
,
bool
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
[](
paddle
::
CustomOpKernelContext
&
self
,
bool
attr
)
{
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
self
.
EmplaceBackAttr
(
attr
);
int
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
.
def
(
"add_attr"
,
float
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
[](
paddle
::
CustomOpKernelContext
&
self
,
int
attr
)
{
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
self
.
EmplaceBackAttr
(
attr
);
int64_t
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
float
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
int64_t
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
string
&
attr
)
{
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
string
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
self
.
EmplaceBackAttr
(
attr
);
...
@@ -817,13 +834,14 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -817,13 +834,14 @@ PYBIND11_MODULE(core_noavx, m) {
.
def
(
"add_attr"
,
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
vector
<
int64_t
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
const
std
::
vector
<
int64_t
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
})
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
.
def
(
"add_attr"
,
[](
paddle
::
CustomOpKernelContext
&
self
,
const
std
::
vector
<
std
::
string
>
&
attr
)
{
const
std
::
vector
<
std
::
string
>
&
attr
)
{
self
.
EmplaceBackAttr
(
attr
);
self
.
EmplaceBackAttr
(
attr
);
});
});
py
::
class_
<
framework
::
Tensor
>
framework_tensor
(
m
,
"Tensor"
,
py
::
class_
<
framework
::
Tensor
>
framework_tensor
(
py
::
buffer_protocol
());
m
,
"Tensor"
,
py
::
buffer_protocol
());
g_framework_tensor_pytype
=
g_framework_tensor_pytype
=
reinterpret_cast
<
PyTypeObject
*>
(
framework_tensor
.
ptr
());
reinterpret_cast
<
PyTypeObject
*>
(
framework_tensor
.
ptr
());
framework_tensor
framework_tensor
...
@@ -898,70 +916,118 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -898,70 +916,118 @@ PYBIND11_MODULE(core_noavx, m) {
self
.
mutable_data
<
float
>
(
place
);
self
.
mutable_data
<
float
>
(
place
);
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
XPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
XPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
CUDAPinnedPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
MLUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
MLUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_clear"
,
&
framework
::
Tensor
::
clear
)
.
def
(
"_clear"
,
&
framework
::
Tensor
::
clear
)
.
def
(
"_mutable_data"
,
.
def
(
"_mutable_data"
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
NPUPlace
&
place
,
[](
framework
::
Tensor
&
self
,
paddle
::
platform
::
NPUPlace
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
type
)
{
return
reinterpret_cast
<
uintptr_t
>
(
return
reinterpret_cast
<
uintptr_t
>
(
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
self
.
mutable_data
(
place
,
framework
::
TransToPhiDataType
(
type
)));
})
})
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CPUPlace
>
,
.
def
(
"_copy_from"
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
&
TensorCopyFrom
<
paddle
::
platform
::
CPUPlace
>
,
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"place"
),
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
NPUPlace
>
,
&
TensorCopyFrom
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"tensor"
),
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"place"
),
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
MLUPlace
>
,
.
def
(
"_copy_from"
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPlace
>
,
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
Place
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"place"
),
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"_copy_from"
,
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
XPUPlace
>
,
&
TensorCopyFrom
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
py
::
arg
(
"tensor"
),
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"place"
),
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
NPUPlace
>
,
.
def
(
"_copy_from"
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
&
TensorCopyFrom
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
IPUPlace
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
py
::
arg
(
"place"
),
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"batch_size"
)
=
-
1
)
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"_copy_from"
,
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
&
TensorCopyFrom
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"_copy_from"
,
&
TensorCopyFrom
<
paddle
::
platform
::
Place
>
,
py
::
arg
(
"tensor"
),
py
::
arg
(
"place"
),
py
::
arg
(
"batch_size"
)
=
-
1
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
XPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
NPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
IPUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
MLUPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
)
.
def
(
"set"
,
SetTensorFromPyArray
<
paddle
::
platform
::
CUDAPinnedPlace
>
,
py
::
arg
(
"array"
),
py
::
arg
(
"place"
),
py
::
arg
(
"zero_copy"
)
=
false
,
R"DOC(
R"DOC(
Set the data of Tensor on place with given numpy array.
Set the data of Tensor on place with given numpy array.
...
@@ -985,7 +1051,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -985,7 +1051,8 @@ PYBIND11_MODULE(core_noavx, m) {
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
)DOC"
)
)DOC"
)
.
def
(
"shape"
,
.
def
(
"shape"
,
[](
framework
::
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
},
[](
framework
::
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
},
R"DOC(
R"DOC(
Return the shape of Tensor.
Return the shape of Tensor.
...
@@ -1046,9 +1113,9 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1046,9 +1113,9 @@ PYBIND11_MODULE(core_noavx, m) {
ostr
<<
self
;
ostr
<<
self
;
return
ostr
.
str
();
return
ostr
.
str
();
})
/* ------ End of original Tensor ------ */
})
/* ------ End of original Tensor ------ */
.
def
(
.
def
(
"__init__"
,
"__init__"
,
[](
framework
::
Tensor
&
instance
,
[](
framework
::
Tensor
&
instance
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
recursive_sequence_lengths
)
{
&
recursive_sequence_lengths
)
{
LoD
new_lod
;
LoD
new_lod
;
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
new_lod
.
reserve
(
recursive_sequence_lengths
.
size
());
...
@@ -1057,7 +1124,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1057,7 +1124,8 @@ PYBIND11_MODULE(core_noavx, m) {
std
::
back_inserter
(
new_lod
));
std
::
back_inserter
(
new_lod
));
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_offset_lod
,
-
1
),
true
,
CheckLoD
(
new_offset_lod
,
-
1
),
true
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The provided recursive_sequence_lengths info is "
"The provided recursive_sequence_lengths info is "
"invalid, "
"invalid, "
...
@@ -1075,7 +1143,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1075,7 +1143,8 @@ PYBIND11_MODULE(core_noavx, m) {
// avoid misuse.
// avoid misuse.
// The discussion is here:
// The discussion is here:
// https://github.com/PaddlePaddle/Paddle/issues/10855
// https://github.com/PaddlePaddle/Paddle/issues/10855
.
def
(
"set_lod"
,
.
def
(
"set_lod"
,
[](
framework
::
Tensor
&
self
,
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
lod
)
{
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
lod
)
{
// the input lod is offset-based level-of-detail info
// the input lod is offset-based level-of-detail info
...
@@ -1083,12 +1152,14 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1083,12 +1152,14 @@ PYBIND11_MODULE(core_noavx, m) {
new_lod
.
reserve
(
lod
.
size
());
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
CheckLoD
(
new_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The provided LoD is invalid, the LoD is %s"
,
new_lod
));
"The provided LoD is invalid, the LoD is %s"
,
new_lod
));
self
.
set_lod
(
new_lod
);
self
.
set_lod
(
new_lod
);
},
},
py
::
arg
(
"lod"
),
R"DOC(
py
::
arg
(
"lod"
),
R"DOC(
Set LoD of the Tensor.
Set LoD of the Tensor.
Args:
Args:
...
@@ -1108,8 +1179,10 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1108,8 +1179,10 @@ PYBIND11_MODULE(core_noavx, m) {
t.set_lod([[0, 2, 5]])
t.set_lod([[0, 2, 5]])
print(t.lod()) # [[0, 2, 5]]
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
)DOC"
)
.
def
(
"set_recursive_sequence_lengths"
,
.
def
(
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
"set_recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
recursive_sequence_lengths
)
{
&
recursive_sequence_lengths
)
{
// the input recursive_sequence_lengths is length-based
// the input recursive_sequence_lengths is length-based
// level-of-detail info
// level-of-detail info
...
@@ -1120,7 +1193,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1120,7 +1193,8 @@ PYBIND11_MODULE(core_noavx, m) {
std
::
back_inserter
(
new_lod
));
std
::
back_inserter
(
new_lod
));
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
LoD
new_offset_lod
=
ConvertToOffsetBasedLoD
(
new_lod
);
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
CheckLoD
(
new_offset_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
CheckLoD
(
new_offset_lod
,
vectorize
(
self
.
dims
()).
front
()),
true
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The provided recursive_sequence_lengths info is "
"The provided recursive_sequence_lengths info is "
"invalid, "
"invalid, "
...
@@ -1129,7 +1203,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1129,7 +1203,8 @@ PYBIND11_MODULE(core_noavx, m) {
new_lod
));
new_lod
));
self
.
set_lod
(
new_offset_lod
);
self
.
set_lod
(
new_offset_lod
);
},
},
py
::
arg
(
"recursive_sequence_lengths"
),
R"DOC(
py
::
arg
(
"recursive_sequence_lengths"
),
R"DOC(
Set LoD of the Tensor according to recursive sequence lengths.
Set LoD of the Tensor according to recursive sequence lengths.
For example, if recursive_sequence_lengths=[[2, 3]], which means
For example, if recursive_sequence_lengths=[[2, 3]], which means
...
@@ -1154,7 +1229,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1154,7 +1229,8 @@ PYBIND11_MODULE(core_noavx, m) {
print(t.recursive_sequence_lengths()) # [[2, 3]]
print(t.recursive_sequence_lengths()) # [[2, 3]]
print(t.lod()) # [[0, 2, 5]]
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
)DOC"
)
.
def
(
"lod"
,
.
def
(
"lod"
,
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
// output the offset-based lod info
// output the offset-based lod info
LoD
lod
=
self
.
lod
();
LoD
lod
=
self
.
lod
();
...
@@ -1181,7 +1257,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1181,7 +1257,8 @@ PYBIND11_MODULE(core_noavx, m) {
print(t.lod()) # [[0, 2, 5]]
print(t.lod()) # [[0, 2, 5]]
)DOC"
)
)DOC"
)
// Set above comments of set_lod.
// Set above comments of set_lod.
.
def
(
"recursive_sequence_lengths"
,
.
def
(
"recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
[](
framework
::
Tensor
&
self
)
->
std
::
vector
<
std
::
vector
<
size_t
>>
{
// output the length-based lod info
// output the length-based lod info
LoD
lod
=
phi
::
ConvertToLengthBasedLoD
(
self
.
lod
());
LoD
lod
=
phi
::
ConvertToLengthBasedLoD
(
self
.
lod
());
...
@@ -1208,7 +1285,8 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1208,7 +1285,8 @@ PYBIND11_MODULE(core_noavx, m) {
t.set_recursive_sequence_lengths([[2, 3]])
t.set_recursive_sequence_lengths([[2, 3]])
print(t.recursive_sequence_lengths()) # [[2, 3]]
print(t.recursive_sequence_lengths()) # [[2, 3]]
)DOC"
)
)DOC"
)
.
def
(
"has_valid_recursive_sequence_lengths"
,
.
def
(
"has_valid_recursive_sequence_lengths"
,
[](
framework
::
Tensor
&
self
)
->
bool
{
[](
framework
::
Tensor
&
self
)
->
bool
{
// Check that the lod info is valid and match the outermost
// Check that the lod info is valid and match the outermost
// dimension of the Tensor data
// dimension of the Tensor data
...
@@ -1594,11 +1672,13 @@ PYBIND11_MODULE(core_noavx, m) {
...
@@ -1594,11 +1672,13 @@ PYBIND11_MODULE(core_noavx, m) {
new
(
&
instance
)
phi
::
SelectedRows
();
new
(
&
instance
)
phi
::
SelectedRows
();
})
})
.
def
(
"__init__"
,
.
def
(
"__init__"
,
[](
phi
::
SelectedRows
&
instance
,
const
std
::
vector
<
int64_t
>
rows
,
[](
phi
::
SelectedRows
&
instance
,
const
std
::
vector
<
int64_t
>
rows
,
const
int64_t
&
height
)
{
const
int64_t
&
height
)
{
new
(
&
instance
)
phi
::
SelectedRows
(
rows
,
height
);
new
(
&
instance
)
phi
::
SelectedRows
(
rows
,
height
);
})
})
.
def
(
"get_tensor"
,
.
def
(
"get_tensor"
,
[](
phi
::
SelectedRows
&
self
)
{
return
self
.
mutable_value
();
},
[](
phi
::
SelectedRows
&
self
)
{
return
self
.
mutable_value
();
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"numel"
,
.
def
(
"numel"
,
...
@@ -1642,7 +1722,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1642,7 +1722,8 @@ All parameter, weight, gradient are variables in Paddle.
})
})
.
def
(
"get_float"
,
.
def
(
"get_float"
,
[](
const
Variable
&
var
)
->
float
{
return
var
.
Get
<
float
>
();
})
[](
const
Variable
&
var
)
->
float
{
return
var
.
Get
<
float
>
();
})
.
def
(
"get_tensor"
,
.
def
(
"get_tensor"
,
[](
Variable
&
self
)
->
LoDTensor
*
{
[](
Variable
&
self
)
->
LoDTensor
*
{
return
self
.
GetMutable
<
LoDTensor
>
();
return
self
.
GetMutable
<
LoDTensor
>
();
},
},
...
@@ -1655,50 +1736,61 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1655,50 +1736,61 @@ All parameter, weight, gradient are variables in Paddle.
[](
Variable
&
self
,
Strings
str_list
)
{
[](
Variable
&
self
,
Strings
str_list
)
{
*
self
.
GetMutable
<
Strings
>
()
=
str_list
;
*
self
.
GetMutable
<
Strings
>
()
=
str_list
;
})
})
.
def
(
"set_vocab"
,
[](
Variable
&
self
,
.
def
(
"set_vocab"
,
Vocab
vocab
)
{
*
self
.
GetMutable
<
Vocab
>
()
=
vocab
;
})
[](
Variable
&
self
,
Vocab
vocab
)
{
.
def
(
"get_string_tensor"
,
*
self
.
GetMutable
<
Vocab
>
()
=
vocab
;
})
.
def
(
"get_string_tensor"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Strings
>
();
},
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Strings
>
();
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"get_map_tensor"
,
.
def
(
"get_map_tensor"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Vocab
>
();
},
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
Vocab
>
();
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_rank_table"
,
.
def
(
"get_lod_rank_table"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDRankTable
>
();
},
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDRankTable
>
();
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"get_selected_rows"
,
.
def
(
"get_selected_rows"
,
[](
Variable
&
self
)
->
phi
::
SelectedRows
*
{
[](
Variable
&
self
)
->
phi
::
SelectedRows
*
{
return
self
.
GetMutable
<
phi
::
SelectedRows
>
();
return
self
.
GetMutable
<
phi
::
SelectedRows
>
();
},
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"get_lod_tensor_array"
,
.
def
(
"get_lod_tensor_array"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"get_fetch_list"
,
.
def
(
"get_fetch_list"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
FetchList
>
();
},
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
FetchList
>
();
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
.
def
(
"get_communicator"
,
.
def
(
"get_communicator"
,
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
return
self
.
GetMutable
<
platform
::
Communicator
>
();
return
self
.
GetMutable
<
platform
::
Communicator
>
();
},
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
#endif
#endif
.
def
(
"get_reader"
,
.
def
(
"get_reader"
,
[](
Variable
&
self
)
->
framework
::
ReaderHolder
*
{
[](
Variable
&
self
)
->
framework
::
ReaderHolder
*
{
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
self
.
IsType
<
framework
::
ReaderHolder
>
(),
self
.
IsType
<
framework
::
ReaderHolder
>
(),
true
,
true
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The variable is not type of ReaderHolder."
));
"The variable is not type of ReaderHolder."
));
return
self
.
GetMutable
<
framework
::
ReaderHolder
>
();
return
self
.
GetMutable
<
framework
::
ReaderHolder
>
();
},
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"get_scope"
,
.
def
(
"get_scope"
,
[](
Variable
&
self
)
->
Scope
*
{
[](
Variable
&
self
)
->
Scope
*
{
auto
scope_vec
=
auto
scope_vec
=
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
self
.
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
PADDLE_ENFORCE_GT
(
PADDLE_ENFORCE_GT
(
scope_vec
->
size
(),
0
,
scope_vec
->
size
(),
0
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The size of scope_vec should be greater than 0"
));
"The size of scope_vec should be greater than 0"
));
return
scope_vec
->
front
();
return
scope_vec
->
front
();
...
@@ -1736,7 +1828,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1736,7 +1828,8 @@ All parameter, weight, gradient are variables in Paddle.
_Scope
_Scope
.
def
(
"_remove_from_pool"
,
.
def
(
"_remove_from_pool"
,
[](
Scope
&
self
)
{
ScopePool
::
Instance
().
Remove
(
&
self
);
})
[](
Scope
&
self
)
{
ScopePool
::
Instance
().
Remove
(
&
self
);
})
.
def
(
"var"
,
.
def
(
"var"
,
[](
Scope
&
self
,
const
std
::
string
&
name
)
->
Variable
*
{
[](
Scope
&
self
,
const
std
::
string
&
name
)
->
Variable
*
{
return
self
.
Var
(
name
);
return
self
.
Var
(
name
);
},
},
...
@@ -1755,7 +1848,9 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1755,7 +1848,9 @@ All parameter, weight, gradient are variables in Paddle.
out (core.Variable): the found or created variable.
out (core.Variable): the found or created variable.
)DOC"
,
)DOC"
,
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"find_var"
,
&
Scope
::
FindVar
,
py
::
arg
(
"name"
),
.
def
(
"find_var"
,
&
Scope
::
FindVar
,
py
::
arg
(
"name"
),
R"DOC(
R"DOC(
Find variable named :code:`name` in the current scope or
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
its parent scope. Return None if not found.
...
@@ -1768,7 +1863,9 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1768,7 +1863,9 @@ All parameter, weight, gradient are variables in Paddle.
)DOC"
,
)DOC"
,
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"size"
,
&
Scope
::
Size
)
.
def
(
"size"
,
&
Scope
::
Size
)
.
def
(
"erase"
,
&
Scope
::
EraseVars
,
py
::
arg
(
"names"
),
.
def
(
"erase"
,
&
Scope
::
EraseVars
,
py
::
arg
(
"names"
),
R"DOC(
R"DOC(
Find variable named :code:`name` in the current scope or
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
its parent scope. Return None if not found.
...
@@ -1780,7 +1877,9 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1780,7 +1877,9 @@ All parameter, weight, gradient are variables in Paddle.
None
None
)DOC"
,
)DOC"
,
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"new_scope"
,
[](
Scope
&
self
)
->
Scope
*
{
return
&
self
.
NewScope
();
},
.
def
(
"new_scope"
,
[](
Scope
&
self
)
->
Scope
*
{
return
&
self
.
NewScope
();
},
R"DOC(
R"DOC(
Create a new sub-scope of the current scope.
Create a new sub-scope of the current scope.
...
@@ -1788,13 +1887,15 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1788,13 +1887,15 @@ All parameter, weight, gradient are variables in Paddle.
out (core._Scope): the created sub-scope.
out (core._Scope): the created sub-scope.
)DOC"
,
)DOC"
,
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"drop_kids"
,
&
Scope
::
DropKids
,
.
def
(
"drop_kids"
,
&
Scope
::
DropKids
,
R"DOC(
R"DOC(
Delete all sub-scopes of the current scope.
Delete all sub-scopes of the current scope.
)DOC"
)
)DOC"
)
.
def
(
"_kids"
,
&
Scope
::
kids
);
.
def
(
"_kids"
,
&
Scope
::
kids
);
m
.
def
(
"Scope"
,
m
.
def
(
"Scope"
,
[]()
->
Scope
*
{
[]()
->
Scope
*
{
auto
*
s
=
new
Scope
();
auto
*
s
=
new
Scope
();
ScopePool
::
Instance
().
Insert
(
std
::
unique_ptr
<
Scope
>
(
s
));
ScopePool
::
Instance
().
Insert
(
std
::
unique_ptr
<
Scope
>
(
s
));
...
@@ -1817,7 +1918,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1817,7 +1918,8 @@ All parameter, weight, gradient are variables in Paddle.
if
(
info
.
HasOpProtoAndChecker
())
{
if
(
info
.
HasOpProtoAndChecker
())
{
std
::
string
str
;
std
::
string
str
;
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
info
.
Proto
().
SerializeToString
(
&
str
),
true
,
info
.
Proto
().
SerializeToString
(
&
str
),
true
,
platform
::
errors
::
Fatal
(
platform
::
errors
::
Fatal
(
"Serialize OpProto Error. This could be a bug of Paddle."
));
"Serialize OpProto Error. This could be a bug of Paddle."
));
ret_values
.
emplace_back
(
str
);
ret_values
.
emplace_back
(
str
);
...
@@ -1838,18 +1940,20 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1838,18 +1940,20 @@ All parameter, weight, gradient are variables in Paddle.
}
}
return
res
;
return
res
;
});
});
m
.
def
(
m
.
def
(
"get_grad_op_desc"
,
"get_grad_op_desc"
,
[](
const
OpDesc
&
op_desc
,
[](
const
OpDesc
&
op_desc
,
const
std
::
unordered_set
<
std
::
string
>
&
no_grad_set
,
const
std
::
unordered_set
<
std
::
string
>
&
no_grad_set
,
const
std
::
vector
<
BlockDesc
*>
&
grad_sub_block
)
{
const
std
::
vector
<
BlockDesc
*>
&
grad_sub_block
)
{
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_to_var
;
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_to_var
;
std
::
vector
<
std
::
unique_ptr
<
OpDesc
>>
grad_op_descs
=
std
::
vector
<
std
::
unique_ptr
<
OpDesc
>>
grad_op_descs
=
framework
::
OpInfoMap
::
Instance
()
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
.
Get
(
op_desc
.
Type
())
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
&
grad_to_var
,
.
GradOpMaker
()(
grad_sub_block
);
op_desc
,
no_grad_set
,
&
grad_to_var
,
grad_sub_block
);
std
::
vector
<
OpDesc
*>
grad_op_desc_ptrs
(
grad_op_descs
.
size
());
std
::
vector
<
OpDesc
*>
grad_op_desc_ptrs
(
grad_op_descs
.
size
());
std
::
transform
(
grad_op_descs
.
begin
(),
grad_op_descs
.
end
(),
std
::
transform
(
grad_op_descs
.
begin
(),
grad_op_descs
.
end
(),
grad_op_desc_ptrs
.
begin
(),
grad_op_desc_ptrs
.
begin
(),
[](
std
::
unique_ptr
<
OpDesc
>
&
p
)
{
return
p
.
release
();
});
[](
std
::
unique_ptr
<
OpDesc
>
&
p
)
{
return
p
.
release
();
});
return
std
::
make_pair
(
grad_op_desc_ptrs
,
grad_to_var
);
return
std
::
make_pair
(
grad_op_desc_ptrs
,
grad_to_var
);
...
@@ -1866,7 +1970,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1866,7 +1970,8 @@ All parameter, weight, gradient are variables in Paddle.
return
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type
).
HasInferInplace
();
return
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type
).
HasInferInplace
();
});
});
m
.
def
(
"infer_no_need_buffer_slots"
,
m
.
def
(
"infer_no_need_buffer_slots"
,
[](
const
std
::
string
op_type
,
const
framework
::
VariableNameMap
&
inputs
,
[](
const
std
::
string
op_type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
{
const
framework
::
AttributeMap
&
attrs
)
{
auto
infer_func
=
framework
::
OpInfoMap
::
Instance
()
auto
infer_func
=
framework
::
OpInfoMap
::
Instance
()
...
@@ -1879,7 +1984,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1879,7 +1984,8 @@ All parameter, weight, gradient are variables in Paddle.
return
empty
;
return
empty
;
}
}
});
});
m
.
def
(
"prune"
,
[](
const
ProgramDesc
&
origin
,
m
.
def
(
"prune"
,
[](
const
ProgramDesc
&
origin
,
const
std
::
set
<
std
::
string
>
&
feeded_var_names
,
const
std
::
set
<
std
::
string
>
&
feeded_var_names
,
const
std
::
vector
<
std
::
array
<
size_t
,
2
>>
&
targets
)
{
const
std
::
vector
<
std
::
array
<
size_t
,
2
>>
&
targets
)
{
ProgramDesc
prog_with_targets
(
origin
);
ProgramDesc
prog_with_targets
(
origin
);
...
@@ -1893,7 +1999,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1893,7 +1999,8 @@ All parameter, weight, gradient are variables in Paddle.
return
std
::
make_tuple
(
ProgramDesc
(
pruned_desc
),
return
std
::
make_tuple
(
ProgramDesc
(
pruned_desc
),
pruned_origin_block_id_map
);
pruned_origin_block_id_map
);
});
});
m
.
def
(
"prune_backward"
,
m
.
def
(
"prune_backward"
,
[](
const
framework
::
ProgramDesc
&
program
)
{
[](
const
framework
::
ProgramDesc
&
program
)
{
return
PruneBackward
(
program
);
return
PruneBackward
(
program
);
},
},
...
@@ -2040,12 +2147,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2040,12 +2147,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types
=
phi
::
DeviceManager
::
GetAllDeviceTypes
();
device_types
=
phi
::
DeviceManager
::
GetAllDeviceTypes
();
#else
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_all_device_type because you have installed"
"Cannot use get_all_device_type because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_all_device_type, please try to install"
"If you want to use get_all_device_type, please try to install"
"CustomDevice version "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
#endif
return
device_types
;
return
device_types
;
});
});
...
@@ -2054,12 +2161,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2054,12 +2161,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types
=
phi
::
DeviceManager
::
GetAllCustomDeviceTypes
();
device_types
=
phi
::
DeviceManager
::
GetAllCustomDeviceTypes
();
#else
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_all_custom_device_type because you have installed"
"Cannot use get_all_custom_device_type because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_all_custom_device_type, please try to "
"If you want to use get_all_custom_device_type, please try to "
"install CustomDevice version "
"install CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
#endif
return
device_types
;
return
device_types
;
});
});
...
@@ -2068,12 +2175,12 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2068,12 +2175,12 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices
=
phi
::
DeviceManager
::
GetAllDeviceList
();
devices
=
phi
::
DeviceManager
::
GetAllDeviceList
();
#else
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_available_device because you have installed"
"Cannot use get_available_device because you have installed"
"CPU/GPU version PaddlePaddle.
\n
"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_available_device, please try to install"
"If you want to use get_available_device, please try to install"
"CustomDevice version "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
#endif
return
devices
;
return
devices
;
});
});
...
@@ -2082,18 +2189,19 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2082,18 +2189,19 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices
=
phi
::
DeviceManager
::
GetAllCustomDeviceList
();
devices
=
phi
::
DeviceManager
::
GetAllCustomDeviceList
();
#else
#else
LOG
(
WARNING
)
<<
string
::
Sprintf
(
VLOG
(
1
)
<<
string
::
Sprintf
(
"Cannot use get_available_custom_device because you have "
"Cannot use get_available_custom_device because you have "
"installed"
"installed"
"CPU/GPU version PaddlePaddle.
\n
"
"CPU/GPU version PaddlePaddle.
\n
"
"If you want to use get_available_custom_device, please try to "
"If you want to use get_available_custom_device, please try to "
"install"
"install"
"CustomDevice version "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
);
"PaddlePaddle by: pip install paddlepaddle
\n
"
);
#endif
#endif
return
devices
;
return
devices
;
});
});
py
::
class_
<
platform
::
CustomPlace
>
(
m
,
"CustomPlace"
,
py
::
class_
<
platform
::
CustomPlace
>
(
m
,
"CustomPlace"
,
R"DOC(
R"DOC(
CustomPlace is a descriptor of a device.
CustomPlace is a descriptor of a device.
It represents a custom device on which a tensor will be allocated and a model will run.
It represents a custom device on which a tensor will be allocated and a model will run.
...
@@ -2105,7 +2213,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2105,7 +2213,8 @@ All parameter, weight, gradient are variables in Paddle.
fake_cpu_place = paddle.CustomPlace("FakeCPU", 0)
fake_cpu_place = paddle.CustomPlace("FakeCPU", 0)
)DOC"
)
)DOC"
)
.
def
(
"__init__"
,
.
def
(
"__init__"
,
[](
platform
::
CustomPlace
&
self
,
const
std
::
string
&
device_type
,
[](
platform
::
CustomPlace
&
self
,
const
std
::
string
&
device_type
,
int
dev_id
)
{
int
dev_id
)
{
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#ifdef PADDLE_WITH_CUSTOM_DEVICE
if
(
UNLIKELY
(
dev_id
<
0
))
{
if
(
UNLIKELY
(
dev_id
<
0
))
{
...
@@ -2113,7 +2222,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2113,7 +2222,8 @@ All parameter, weight, gradient are variables in Paddle.
"Invalid CustomPlace(%s, %d), device id must be 0 "
"Invalid CustomPlace(%s, %d), device id must be 0 "
"or "
"or "
"positive integer"
,
"positive integer"
,
device_type
,
dev_id
);
device_type
,
dev_id
);
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2134,7 +2244,11 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2134,7 +2244,11 @@ All parameter, weight, gradient are variables in Paddle.
"inside "
"inside "
"[0, %d), because %s "
"[0, %d), because %s "
"number on your machine is %d"
,
"number on your machine is %d"
,
device_type
,
dev_id
,
dev_count
,
device_type
,
dev_count
);
device_type
,
dev_id
,
dev_count
,
device_type
,
dev_count
);
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
}
}
...
@@ -2144,7 +2258,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2144,7 +2258,8 @@ All parameter, weight, gradient are variables in Paddle.
"Invalid CustomPlace(%s, %d), the device type is "
"Invalid CustomPlace(%s, %d), the device type is "
"not registered "
"not registered "
"as a custom device."
,
"as a custom device."
,
device_type
,
dev_id
);
device_type
,
dev_id
);
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
#else
#else
...
@@ -2153,7 +2268,7 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2153,7 +2268,7 @@ All parameter, weight, gradient are variables in Paddle.
"version PaddlePaddle.
\n
"
"version PaddlePaddle.
\n
"
"If you want to use CustomDevice, please try to install"
"If you want to use CustomDevice, please try to install"
"CustomDevice version "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle
-core
\n
"
"PaddlePaddle by: pip install paddlepaddle
\n
"
"If you only have CPU, please change "
"If you only have CPU, please change "
"CustomPlace(%s, %d) to be CPUPlace().
\n
"
,
"CustomPlace(%s, %d) to be CPUPlace().
\n
"
,
device_type
,
dev_id
);
device_type
,
dev_id
);
...
@@ -2215,7 +2330,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2215,7 +2330,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid CUDAPlace(%d), must inside [0, %d), because GPU "
"Invalid CUDAPlace(%d), must inside [0, %d), because GPU "
"number on your machine is %d"
,
"number on your machine is %d"
,
dev_id
,
platform
::
GetGPUDeviceCount
(),
dev_id
,
platform
::
GetGPUDeviceCount
(),
platform
::
GetGPUDeviceCount
());
platform
::
GetGPUDeviceCount
());
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2281,7 +2397,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2281,7 +2397,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid XPUPlace(%d), must inside [0, %d), because XPU "
"Invalid XPUPlace(%d), must inside [0, %d), because XPU "
"number on your machine is %d"
,
"number on your machine is %d"
,
dev_id
,
platform
::
GetXPUDeviceCount
(),
dev_id
,
platform
::
GetXPUDeviceCount
(),
platform
::
GetXPUDeviceCount
());
platform
::
GetXPUDeviceCount
());
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2446,7 +2563,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2446,7 +2563,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid NPUPlace(%d), must inside [0, %d), because NPU "
"Invalid NPUPlace(%d), must inside [0, %d), because NPU "
"number on your machine is %d"
,
"number on your machine is %d"
,
dev_id
,
platform
::
GetNPUDeviceCount
(),
dev_id
,
platform
::
GetNPUDeviceCount
(),
platform
::
GetNPUDeviceCount
());
platform
::
GetNPUDeviceCount
());
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2562,7 +2680,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2562,7 +2680,8 @@ All parameter, weight, gradient are variables in Paddle.
LOG
(
ERROR
)
<<
string
::
Sprintf
(
LOG
(
ERROR
)
<<
string
::
Sprintf
(
"Invalid MLUPlace(%d), must inside [0, %d), because MLU "
"Invalid MLUPlace(%d), must inside [0, %d), because MLU "
"number on your machine is %d"
,
"number on your machine is %d"
,
dev_id
,
platform
::
GetMLUDeviceCount
(),
dev_id
,
platform
::
GetMLUDeviceCount
(),
platform
::
GetMLUDeviceCount
());
platform
::
GetMLUDeviceCount
());
std
::
exit
(
-
1
);
std
::
exit
(
-
1
);
}
}
...
@@ -2635,8 +2754,10 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2635,8 +2754,10 @@ All parameter, weight, gradient are variables in Paddle.
.
def
(
"mlu_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"mlu_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"custom_device_id"
,
.
def
(
"custom_device_id"
,
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
[](
platform
::
Place
&
self
)
{
return
self
.
device
;
})
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
.
def
(
"set_place"
,
const
platform
::
Place
&
other
)
{
self
=
other
;
})
[](
platform
::
Place
&
self
,
const
platform
::
Place
&
other
)
{
self
=
other
;
})
.
def
(
"set_place"
,
.
def
(
"set_place"
,
[](
platform
::
Place
&
self
,
const
platform
::
CPUPlace
&
cpu_place
)
{
[](
platform
::
Place
&
self
,
const
platform
::
CPUPlace
&
cpu_place
)
{
self
=
cpu_place
;
self
=
cpu_place
;
...
@@ -2681,7 +2802,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2681,7 +2802,8 @@ All parameter, weight, gradient are variables in Paddle.
true
,
true
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"Cannot parse user input to OpDesc"
));
"Cannot parse user input to OpDesc"
));
PADDLE_ENFORCE_EQ
(
desc
.
IsInitialized
(),
true
,
PADDLE_ENFORCE_EQ
(
desc
.
IsInitialized
(),
true
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The provided OpDesc is not "
"The provided OpDesc is not "
"initialized, the reason is: %s"
,
"initialized, the reason is: %s"
,
...
@@ -2689,37 +2811,43 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2689,37 +2811,43 @@ All parameter, weight, gradient are variables in Paddle.
return
OpRegistry
::
CreateOp
(
desc
);
return
OpRegistry
::
CreateOp
(
desc
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CPUPlace
&
place
)
{
const
platform
::
CPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
XPUPlace
&
place
)
{
const
platform
::
XPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
NPUPlace
&
place
)
{
const
platform
::
NPUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CUDAPlace
&
place
)
{
const
platform
::
CUDAPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
CUDAPinnedPlace
&
place
)
{
const
platform
::
CUDAPinnedPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
[](
OperatorBase
&
self
,
const
Scope
&
scope
,
const
platform
::
MLUPlace
&
place
)
{
const
platform
::
MLUPlace
&
place
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
scope
,
place
);
self
.
Run
(
scope
,
place
);
...
@@ -2745,7 +2873,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2745,7 +2873,8 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
framework
::
TrainerBase
,
std
::
shared_ptr
<
framework
::
TrainerBase
>>
(
py
::
class_
<
framework
::
TrainerBase
,
std
::
shared_ptr
<
framework
::
TrainerBase
>>
(
m
,
"TrainerBase"
)
m
,
"TrainerBase"
)
.
def
(
"get_worker_scope"
,
.
def
(
"get_worker_scope"
,
[](
TrainerBase
&
self
,
int
thread_id
)
->
Scope
*
{
[](
TrainerBase
&
self
,
int
thread_id
)
->
Scope
*
{
return
self
.
GetWorkerScope
(
thread_id
);
return
self
.
GetWorkerScope
(
thread_id
);
},
},
...
@@ -2758,13 +2887,17 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2758,13 +2887,17 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
framework
::
Executor
>
(
m
,
"Executor"
)
py
::
class_
<
framework
::
Executor
>
(
m
,
"Executor"
)
.
def
(
py
::
init
<
const
platform
::
Place
&>
())
.
def
(
py
::
init
<
const
platform
::
Place
&>
())
.
def
(
"close"
,
&
Executor
::
Close
)
.
def
(
"close"
,
&
Executor
::
Close
)
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
,
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"release_trainer"
,
&
Executor
::
ReleaseTrainer
,
.
def
(
"release_trainer"
,
&
Executor
::
ReleaseTrainer
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"init_for_dataset"
,
.
def
(
"init_for_dataset"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
[](
Executor
&
self
,
const
std
::
string
&
trainer_desc
,
Scope
*
scope
,
const
ProgramDesc
&
prog
,
const
std
::
string
&
trainer_desc
,
Scope
*
scope
,
Dataset
*
dataset
)
->
std
::
shared_ptr
<
TrainerBase
>
{
Dataset
*
dataset
)
->
std
::
shared_ptr
<
TrainerBase
>
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
return
self
.
InitForDataset
(
prog
,
trainer_desc
,
scope
,
dataset
);
return
self
.
InitForDataset
(
prog
,
trainer_desc
,
scope
,
dataset
);
...
@@ -2775,40 +2908,62 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2775,40 +2908,62 @@ All parameter, weight, gradient are variables in Paddle.
self
.
RunFromDataset
(
trainer
);
self
.
RunFromDataset
(
trainer
);
})
})
.
def
(
"run_prepared_ctx"
,
.
def
(
"run_prepared_ctx"
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>
*
feed_targets
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>
*
feed_targets
,
std
::
map
<
std
::
string
,
FetchType
*>
*
fetch_targets
,
std
::
map
<
std
::
string
,
FetchType
*>
*
fetch_targets
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
fetch_holder_name
=
"fetch"
)
{
const
std
::
string
&
fetch_holder_name
=
"fetch"
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
RunPreparedContext
(
ctx
,
scope
,
feed_targets
,
fetch_targets
,
self
.
RunPreparedContext
(
ctx
,
create_local_scope
,
create_vars
,
scope
,
feed_holder_name
,
fetch_holder_name
);
feed_targets
,
fetch_targets
,
create_local_scope
,
create_vars
,
feed_holder_name
,
fetch_holder_name
);
})
})
.
def
(
"run_prepared_ctx"
,
.
def
(
"run_prepared_ctx"
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
[](
Executor
&
self
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
bool
keep_kids
=
false
)
{
bool
keep_kids
=
false
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
RunPreparedContext
(
ctx
,
scope
,
create_local_scope
,
self
.
RunPreparedContext
(
create_vars
,
keep_kids
);
ctx
,
scope
,
create_local_scope
,
create_vars
,
keep_kids
);
})
})
.
def
(
"prepare"
,
.
def
(
"prepare"
,
[](
Executor
&
self
,
const
ProgramDesc
&
program
,
int
block_id
,
[](
Executor
&
self
,
const
ProgramDesc
&
program
,
int
block_id
,
const
std
::
vector
<
std
::
string
>
&
skip_ref_cnt_vars
=
const
std
::
vector
<
std
::
string
>
&
skip_ref_cnt_vars
=
std
::
vector
<
std
::
string
>
(),
std
::
vector
<
std
::
string
>
(),
bool
force_disable_gc
=
false
)
{
bool
force_disable_gc
=
false
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
return
self
.
Prepare
(
program
,
block_id
,
skip_ref_cnt_vars
,
return
self
.
Prepare
(
force_disable_gc
);
program
,
block_id
,
skip_ref_cnt_vars
,
force_disable_gc
);
})
})
.
def
(
"create_variables"
,
&
Executor
::
CreateVariables
)
.
def
(
"create_variables"
,
&
Executor
::
CreateVariables
)
.
def
(
"run"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
Scope
*
scope
,
.
def
(
"run"
,
int
block_id
,
bool
create_local_scope
,
bool
create_vars
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
Scope
*
scope
,
int
block_id
,
bool
create_local_scope
,
bool
create_vars
,
const
std
::
vector
<
std
::
string
>
&
fetch_vars
)
{
const
std
::
vector
<
std
::
string
>
&
fetch_vars
)
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
self
.
Run
(
prog
,
scope
,
block_id
,
create_local_scope
,
create_vars
,
self
.
Run
(
prog
,
scope
,
block_id
,
create_local_scope
,
create_vars
,
fetch_vars
);
fetch_vars
);
});
});
...
@@ -2821,8 +2976,10 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2821,8 +2976,10 @@ All parameter, weight, gradient are variables in Paddle.
});
});
py
::
class_
<
framework
::
StandaloneExecutor
>
(
m
,
"StandaloneExecutor"
)
py
::
class_
<
framework
::
StandaloneExecutor
>
(
m
,
"StandaloneExecutor"
)
.
def
(
py
::
init
<
const
platform
::
Place
&
,
const
ProgramDesc
&
,
.
def
(
py
::
init
<
const
platform
::
Place
&
,
const
ProgramDesc
&
,
Scope
*>
())
const
ProgramDesc
&
,
const
ProgramDesc
&
,
Scope
*>
())
.
def
(
"run"
,
.
def
(
"run"
,
[](
StandaloneExecutor
&
self
,
[](
StandaloneExecutor
&
self
,
const
std
::
unordered_map
<
std
::
string
,
py
::
array
>
&
input_dict
,
const
std
::
unordered_map
<
std
::
string
,
py
::
array
>
&
input_dict
,
...
@@ -2866,11 +3023,13 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2866,11 +3023,13 @@ All parameter, weight, gradient are variables in Paddle.
return
py
::
cast
(
std
::
move
(
ret
));
return
py
::
cast
(
std
::
move
(
ret
));
})
})
.
def
(
"run"
,
.
def
(
"run"
,
[](
StandaloneExecutor
&
self
,
std
::
vector
<
std
::
string
>
feed_names
,
[](
StandaloneExecutor
&
self
,
std
::
vector
<
std
::
string
>
feed_names
,
std
::
vector
<
std
::
string
>
fetch_names
)
{
std
::
vector
<
std
::
string
>
fetch_names
)
{
platform
::
RecordEvent
record_event
(
platform
::
RecordEvent
record_event
(
"StandaloneExecutor:run"
,
"StandaloneExecutor:run"
,
platform
::
TracerEventType
::
UserDefined
,
1
);
platform
::
TracerEventType
::
UserDefined
,
1
);
paddle
::
framework
::
FetchList
ret
;
paddle
::
framework
::
FetchList
ret
;
{
{
pybind11
::
gil_scoped_release
release
;
pybind11
::
gil_scoped_release
release
;
...
@@ -2951,20 +3110,29 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2951,20 +3110,29 @@ All parameter, weight, gradient are variables in Paddle.
});
});
m
.
def
(
"memory_stat_get_current"
,
memory
::
StatGetCurrentValue
);
m
.
def
(
"memory_stat_get_current"
,
memory
::
StatGetCurrentValue
);
m
.
def
(
"memory_stat_get_peak"
,
memory
::
StatGetPeakValue
);
m
.
def
(
"memory_stat_get_peak"
,
memory
::
StatGetPeakValue
);
m
.
def
(
"run_cmd"
,
m
.
def
(
[](
const
std
::
string
&
cmd
,
int
time_out
=
-
1
,
"run_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
-
1
,
int
sleep_inter
=
-
1
)
->
const
std
::
string
{
int
sleep_inter
=
-
1
)
->
const
std
::
string
{
return
paddle
::
framework
::
shell_get_command_output
(
cmd
,
time_out
,
return
paddle
::
framework
::
shell_get_command_output
(
sleep_inter
);
cmd
,
time_out
,
sleep_inter
);
},
},
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
-
1
,
py
::
arg
(
"sleep_inter"
)
=
-
1
);
py
::
arg
(
"cmd"
),
m
.
def
(
"shell_execute_cmd"
,
py
::
arg
(
"time_out"
)
=
-
1
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
0
,
int
sleep_inter
=
0
,
py
::
arg
(
"sleep_inter"
)
=
-
1
);
m
.
def
(
"shell_execute_cmd"
,
[](
const
std
::
string
&
cmd
,
int
time_out
=
0
,
int
sleep_inter
=
0
,
bool
redirect_stderr
=
false
)
->
std
::
vector
<
std
::
string
>
{
bool
redirect_stderr
=
false
)
->
std
::
vector
<
std
::
string
>
{
return
paddle
::
framework
::
shell_execute_cmd
(
return
paddle
::
framework
::
shell_execute_cmd
(
cmd
,
time_out
,
sleep_inter
,
redirect_stderr
);
cmd
,
time_out
,
sleep_inter
,
redirect_stderr
);
},
},
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
0
,
py
::
arg
(
"sleep_inter"
)
=
0
,
py
::
arg
(
"cmd"
),
py
::
arg
(
"time_out"
)
=
0
,
py
::
arg
(
"sleep_inter"
)
=
0
,
py
::
arg
(
"redirect_stderr"
)
=
false
);
py
::
arg
(
"redirect_stderr"
)
=
false
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...
@@ -2979,13 +3147,16 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -2979,13 +3147,16 @@ All parameter, weight, gradient are variables in Paddle.
#endif
#endif
m
.
def
(
"set_feed_variable"
,
m
.
def
(
"set_feed_variable"
,
static_cast
<
void
(
*
)(
Scope
*
,
const
LoDTensor
&
,
const
std
::
string
&
,
static_cast
<
void
(
*
)(
size_t
)
>
(
&
framework
::
SetFeedVariable
));
Scope
*
,
const
LoDTensor
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
m
.
def
(
"set_feed_variable"
,
m
.
def
(
"set_feed_variable"
,
static_cast
<
void
(
*
)(
Scope
*
,
const
Strings
&
,
const
std
::
string
&
,
static_cast
<
void
(
*
)(
size_t
)
>
(
&
framework
::
SetFeedVariable
));
Scope
*
,
const
Strings
&
,
const
std
::
string
&
,
size_t
)
>
(
&
framework
::
SetFeedVariable
));
m
.
def
(
"get_fetch_variable"
,
m
.
def
(
"get_fetch_variable"
,
[](
const
Scope
&
scope
,
const
std
::
string
&
var_name
,
[](
const
Scope
&
scope
,
const
std
::
string
&
var_name
,
size_t
index
)
->
py
::
object
{
size_t
index
)
->
py
::
object
{
auto
&
var
=
framework
::
GetFetchVariable
(
scope
,
var_name
,
index
);
auto
&
var
=
framework
::
GetFetchVariable
(
scope
,
var_name
,
index
);
if
(
data_is_lod_tensor
(
var
))
{
if
(
data_is_lod_tensor
(
var
))
{
...
@@ -3033,26 +3204,30 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3033,26 +3204,30 @@ All parameter, weight, gradient are variables in Paddle.
pylodtensorarray
pylodtensorarray
.
def
(
"__init__"
,
.
def
(
"__init__"
,
[](
LoDTensorArray
&
instance
)
{
new
(
&
instance
)
LoDTensorArray
();
})
[](
LoDTensorArray
&
instance
)
{
new
(
&
instance
)
LoDTensorArray
();
})
.
def
(
"__getitem__"
,
.
def
(
"__getitem__"
,
[](
LoDTensorArray
&
self
,
size_t
i
)
{
return
&
self
.
at
(
i
);
},
[](
LoDTensorArray
&
self
,
size_t
i
)
{
return
&
self
.
at
(
i
);
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"__len__"
,
[](
LoDTensorArray
&
self
)
{
return
self
.
size
();
})
.
def
(
"__len__"
,
[](
LoDTensorArray
&
self
)
{
return
self
.
size
();
})
.
def
(
"__setitem__"
,
.
def
(
"__setitem__"
,
[](
LoDTensorArray
&
self
,
size_t
i
,
const
LoDTensor
&
t
)
{
[](
LoDTensorArray
&
self
,
size_t
i
,
const
LoDTensor
&
t
)
{
PADDLE_ENFORCE_LT
(
i
,
self
.
size
(),
PADDLE_ENFORCE_LT
(
i
,
self
.
size
(),
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The index to set is larger than the size "
"The index to set is larger than the size "
"of LoDTensorArray."
));
"of LoDTensorArray."
));
self
[
i
].
ShareDataWith
(
t
);
self
[
i
].
ShareDataWith
(
t
);
self
[
i
].
set_lod
(
t
.
lod
());
self
[
i
].
set_lod
(
t
.
lod
());
})
})
.
def
(
"append"
,
.
def
(
"append"
,
[](
LoDTensorArray
&
self
,
const
LoDTensor
&
t
)
{
[](
LoDTensorArray
&
self
,
const
LoDTensor
&
t
)
{
self
.
emplace_back
();
self
.
emplace_back
();
self
.
back
().
ShareDataWith
(
t
);
self
.
back
().
ShareDataWith
(
t
);
self
.
back
().
set_lod
(
t
.
lod
());
self
.
back
().
set_lod
(
t
.
lod
());
},
},
py
::
arg
(
"tensor"
),
R"DOC(
py
::
arg
(
"tensor"
),
R"DOC(
Append a LoDensor to LoDTensorArray.
Append a LoDensor to LoDTensorArray.
Args:
Args:
...
@@ -3072,7 +3247,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3072,7 +3247,8 @@ All parameter, weight, gradient are variables in Paddle.
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
arr.append(t)
arr.append(t)
)DOC"
)
)DOC"
)
.
def
(
"_move_to_list"
,
.
def
(
"_move_to_list"
,
[](
LoDTensorArray
&
self
)
->
py
::
list
{
[](
LoDTensorArray
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
...
@@ -3086,7 +3262,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3086,7 +3262,8 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
FetchList
>
(
m
,
"FetchList"
,
R"DOC( FetchList is a
py
::
class_
<
FetchList
>
(
m
,
"FetchList"
,
R"DOC( FetchList is a
vector of boost::variant<LoDTensor, LoDTensorArray>.
vector of boost::variant<LoDTensor, LoDTensorArray>.
)DOC"
)
)DOC"
)
.
def
(
"_move_to_list"
,
.
def
(
"_move_to_list"
,
[](
FetchList
&
self
)
->
py
::
list
{
[](
FetchList
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
...
@@ -3107,7 +3284,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3107,7 +3284,8 @@ All parameter, weight, gradient are variables in Paddle.
},
},
py
::
return_value_policy
::
take_ownership
)
py
::
return_value_policy
::
take_ownership
)
.
def
(
"append"
,
.
def
(
"append"
,
[](
FetchList
&
self
,
const
LoDTensor
&
t
)
{
[](
FetchList
&
self
,
const
LoDTensor
&
t
)
{
self
.
emplace_back
();
self
.
emplace_back
();
auto
&
lod_tensor
=
BOOST_GET
(
LoDTensor
,
self
.
back
());
auto
&
lod_tensor
=
BOOST_GET
(
LoDTensor
,
self
.
back
());
...
@@ -3116,7 +3294,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3116,7 +3294,8 @@ All parameter, weight, gradient are variables in Paddle.
},
},
py
::
arg
(
"var"
))
py
::
arg
(
"var"
))
.
def
(
"append"
,
.
def
(
"append"
,
[](
FetchList
&
self
,
const
LoDTensorArray
&
t
)
{
[](
FetchList
&
self
,
const
LoDTensorArray
&
t
)
{
self
.
emplace_back
();
self
.
emplace_back
();
auto
&
lod_tensor_array
=
BOOST_GET
(
LoDTensorArray
,
self
.
back
());
auto
&
lod_tensor_array
=
BOOST_GET
(
LoDTensorArray
,
self
.
back
());
...
@@ -3130,7 +3309,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3130,7 +3309,8 @@ All parameter, weight, gradient are variables in Paddle.
py
::
class_
<
FetchUnmergedList
>
(
m
,
"FetchUnmergedList"
,
R"DOC(
py
::
class_
<
FetchUnmergedList
>
(
m
,
"FetchUnmergedList"
,
R"DOC(
FetchUnmergedList is 2-D array of FetchType(boost::variant(LoDTensor, LoDTensorArray)).
FetchUnmergedList is 2-D array of FetchType(boost::variant(LoDTensor, LoDTensorArray)).
)DOC"
)
)DOC"
)
.
def
(
"_move_to_list"
,
.
def
(
"_move_to_list"
,
[](
FetchUnmergedList
&
self
)
->
py
::
list
{
[](
FetchUnmergedList
&
self
)
->
py
::
list
{
py
::
list
res
(
self
.
size
());
py
::
list
res
(
self
.
size
());
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
self
.
size
();
++
i
)
{
...
@@ -3168,7 +3348,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3168,7 +3348,8 @@ All parameter, weight, gradient are variables in Paddle.
}
}
platform
::
EmptyCache
();
platform
::
EmptyCache
();
});
});
m
.
def
(
"get_device_properties"
,
m
.
def
(
"get_device_properties"
,
[](
int
id
)
->
const
gpuDeviceProp
&
{
[](
int
id
)
->
const
gpuDeviceProp
&
{
return
platform
::
GetDeviceProperties
(
id
);
return
platform
::
GetDeviceProperties
(
id
);
},
},
...
@@ -3283,16 +3464,18 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3283,16 +3464,18 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"reset_profiler"
,
platform
::
ResetProfiler
);
m
.
def
(
"reset_profiler"
,
platform
::
ResetProfiler
);
m
.
def
(
"register_pass"
,
[](
const
std
::
string
&
pass_type
,
py
::
object
callable
)
{
m
.
def
(
"register_pass"
,
[](
const
std
::
string
&
pass_type
,
py
::
object
callable
)
{
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
framework
::
ir
::
PassRegistry
::
Instance
().
Has
(
pass_type
),
false
,
framework
::
ir
::
PassRegistry
::
Instance
().
Has
(
pass_type
),
false
,
platform
::
errors
::
AlreadyExists
(
"Pass '%s' is registered more than "
platform
::
errors
::
AlreadyExists
(
"Pass '%s' is registered more than "
"once. Please use another name."
,
"once. Please use another name."
,
pass_type
));
pass_type
));
callable
.
inc_ref
();
callable
.
inc_ref
();
framework
::
ir
::
PassRegistry
::
Instance
().
Insert
(
pass_type
,
[
pass_type
,
framework
::
ir
::
PassRegistry
::
Instance
().
Insert
(
callable
]()
{
pass_type
,
[
pass_type
,
callable
]()
{
py
::
gil_scoped_acquire
guard
;
py
::
gil_scoped_acquire
guard
;
std
::
unique_ptr
<
framework
::
ir
::
Pass
>
pass
(
std
::
unique_ptr
<
framework
::
ir
::
Pass
>
pass
(
new
framework
::
ir
::
GeneratePass
(
py
::
cast
<
std
::
string
>
(
callable
())));
new
framework
::
ir
::
GeneratePass
(
py
::
cast
<
std
::
string
>
(
callable
())));
return
pass
;
return
pass
;
});
});
});
});
...
@@ -3304,7 +3487,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3304,7 +3487,8 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"size_of_dtype"
,
framework
::
SizeOfType
);
m
.
def
(
"size_of_dtype"
,
framework
::
SizeOfType
);
py
::
class_
<
paddle
::
platform
::
ProfilerResult
>
(
m
,
"_ProfilerResult"
)
py
::
class_
<
paddle
::
platform
::
ProfilerResult
>
(
m
,
"_ProfilerResult"
)
.
def
(
py
::
init
<>
())
.
def
(
py
::
init
<>
())
.
def
(
"get_data"
,
&
paddle
::
platform
::
ProfilerResult
::
GetData
,
.
def
(
"get_data"
,
&
paddle
::
platform
::
ProfilerResult
::
GetData
,
py
::
return_value_policy
::
automatic_reference
)
py
::
return_value_policy
::
automatic_reference
)
.
def
(
"save"
,
&
paddle
::
platform
::
ProfilerResult
::
Save
)
.
def
(
"save"
,
&
paddle
::
platform
::
ProfilerResult
::
Save
)
.
def
(
"get_extra_info"
,
&
paddle
::
platform
::
ProfilerResult
::
GetExtraInfo
);
.
def
(
"get_extra_info"
,
&
paddle
::
platform
::
ProfilerResult
::
GetExtraInfo
);
...
@@ -3339,7 +3523,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3339,7 +3523,8 @@ All parameter, weight, gradient are variables in Paddle.
&
paddle
::
platform
::
HostPythonNode
::
device_node_ptrs
);
&
paddle
::
platform
::
HostPythonNode
::
device_node_ptrs
);
py
::
class_
<
paddle
::
platform
::
Profiler
>
(
m
,
"_Profiler"
)
py
::
class_
<
paddle
::
platform
::
Profiler
>
(
m
,
"_Profiler"
)
.
def
(
"create"
,
&
paddle
::
platform
::
Profiler
::
Create
,
.
def
(
"create"
,
&
paddle
::
platform
::
Profiler
::
Create
,
py
::
return_value_policy
::
take_ownership
)
py
::
return_value_policy
::
take_ownership
)
.
def
(
"is_cupti_supported"
,
&
paddle
::
platform
::
Profiler
::
IsCuptiSupported
)
.
def
(
"is_cupti_supported"
,
&
paddle
::
platform
::
Profiler
::
IsCuptiSupported
)
.
def
(
"is_cnpapi_supported"
,
.
def
(
"is_cnpapi_supported"
,
...
@@ -3350,7 +3535,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3350,7 +3535,8 @@ All parameter, weight, gradient are variables in Paddle.
profiler
->
Prepare
();
profiler
->
Prepare
();
})
})
.
def
(
"start"
,
&
paddle
::
platform
::
Profiler
::
Start
)
.
def
(
"start"
,
&
paddle
::
platform
::
Profiler
::
Start
)
.
def
(
"stop"
,
.
def
(
"stop"
,
[](
paddle
::
platform
::
Profiler
*
profiler
)
{
[](
paddle
::
platform
::
Profiler
*
profiler
)
{
platform
::
DisableHostEventRecorder
();
platform
::
DisableHostEventRecorder
();
return
profiler
->
Stop
();
return
profiler
->
Stop
();
...
@@ -3412,22 +3598,29 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3412,22 +3598,29 @@ All parameter, weight, gradient are variables in Paddle.
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
const
std
::
string
&
attr
)
{
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
const
std
::
string
&
attr
)
{
self
.
Set
<
std
::
string
>
(
name
,
new
std
::
string
(
attr
));
self
.
Set
<
std
::
string
>
(
name
,
new
std
::
string
(
attr
));
})
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
bool
val
)
{
self
.
Set
<
bool
>
(
name
,
new
bool
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
int
val
)
{
self
.
Set
<
const
int
>
(
name
,
new
int
(
val
));
})
.
def
(
"set"
,
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
bool
val
)
{
self
.
Set
<
bool
>
(
name
,
new
bool
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
int
val
)
{
self
.
Set
<
const
int
>
(
name
,
new
int
(
val
));
})
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
vector
<
std
::
string
>
set
)
{
std
::
vector
<
std
::
string
>
set
)
{
self
.
Set
(
name
,
new
std
::
vector
<
std
::
string
>
(
set
));
self
.
Set
(
name
,
new
std
::
vector
<
std
::
string
>
(
set
));
})
})
.
def
(
"set"
,
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
unordered_set
<
std
::
string
>
set
)
{
std
::
unordered_set
<
std
::
string
>
set
)
{
self
.
Set
(
name
,
new
std
::
unordered_set
<
std
::
string
>
(
set
));
self
.
Set
(
name
,
new
std
::
unordered_set
<
std
::
string
>
(
set
));
})
})
.
def
(
"set"
,
.
def
(
"set"
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
[](
ir
::
Pass
&
self
,
const
std
::
string
&
name
,
std
::
unordered_set
<
int
>
set
)
{
std
::
unordered_set
<
int
>
set
)
{
self
.
Set
(
name
,
new
std
::
unordered_set
<
int
>
(
set
));
self
.
Set
(
name
,
new
std
::
unordered_set
<
int
>
(
set
));
})
})
...
@@ -3604,7 +3797,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3604,7 +3797,8 @@ All parameter, weight, gradient are variables in Paddle.
},
},
R"DOC(This config that the this is distributed training with parameter server
R"DOC(This config that the this is distributed training with parameter server
)DOC"
)
)DOC"
)
.
def_property
(
"_dry_run"
,
.
def_property
(
"_dry_run"
,
[](
const
ExecutionStrategy
&
self
)
{
return
self
.
dry_run_
;
},
[](
const
ExecutionStrategy
&
self
)
{
return
self
.
dry_run_
;
},
[](
ExecutionStrategy
&
self
,
bool
dry_run
)
{
[](
ExecutionStrategy
&
self
,
bool
dry_run
)
{
self
.
dry_run_
=
dry_run
;
self
.
dry_run_
=
dry_run
;
...
@@ -3671,7 +3865,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3671,7 +3865,8 @@ All parameter, weight, gradient are variables in Paddle.
"reduce_strategy"
,
"reduce_strategy"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
reduce_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
reduce_
;
},
[](
BuildStrategy
&
self
,
BuildStrategy
::
ReduceStrategy
strategy
)
{
[](
BuildStrategy
&
self
,
BuildStrategy
::
ReduceStrategy
strategy
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3701,7 +3896,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3701,7 +3896,8 @@ All parameter, weight, gradient are variables in Paddle.
[](
const
BuildStrategy
&
self
)
{
return
self
.
gradient_scale_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
gradient_scale_
;
},
[](
BuildStrategy
&
self
,
[](
BuildStrategy
&
self
,
BuildStrategy
::
GradientScaleStrategy
strategy
)
{
BuildStrategy
::
GradientScaleStrategy
strategy
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3766,7 +3962,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3766,7 +3962,8 @@ All parameter, weight, gradient are variables in Paddle.
"debug_graphviz_path"
,
"debug_graphviz_path"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
debug_graphviz_path_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
debug_graphviz_path_
;
},
[](
BuildStrategy
&
self
,
const
std
::
string
&
path
)
{
[](
BuildStrategy
&
self
,
const
std
::
string
&
path
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3793,7 +3990,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3793,7 +3990,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
enable_sequential_execution_
;
return
self
.
enable_sequential_execution_
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3819,7 +4017,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3819,7 +4017,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
remove_unnecessary_lock_
;
return
self
.
remove_unnecessary_lock_
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3856,7 +4055,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3856,7 +4055,8 @@ All parameter, weight, gradient are variables in Paddle.
const
std
::
vector
<
std
::
string
>
&
trainers_endpoints
)
{
const
std
::
vector
<
std
::
string
>
&
trainers_endpoints
)
{
self
.
trainers_endpoints_
=
trainers_endpoints
;
self
.
trainers_endpoints_
=
trainers_endpoints
;
})
})
.
def_property
(
"trainer_id"
,
.
def_property
(
"trainer_id"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
trainer_id_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
trainer_id_
;
},
[](
BuildStrategy
&
self
,
int
trainer_id
)
{
[](
BuildStrategy
&
self
,
int
trainer_id
)
{
self
.
trainer_id_
=
trainer_id
;
self
.
trainer_id_
=
trainer_id
;
...
@@ -3873,14 +4073,16 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3873,14 +4073,16 @@ All parameter, weight, gradient are variables in Paddle.
[](
BuildStrategy
&
self
,
int
bkcl_comm_num
)
{
[](
BuildStrategy
&
self
,
int
bkcl_comm_num
)
{
self
.
bkcl_comm_num_
=
bkcl_comm_num
;
self
.
bkcl_comm_num_
=
bkcl_comm_num
;
})
})
.
def_property
(
"use_hierarchical_allreduce"
,
.
def_property
(
"use_hierarchical_allreduce"
,
[](
const
BuildStrategy
&
self
)
{
[](
const
BuildStrategy
&
self
)
{
return
self
.
use_hierarchical_allreduce_
;
return
self
.
use_hierarchical_allreduce_
;
},
},
[](
BuildStrategy
&
self
,
bool
use
)
{
[](
BuildStrategy
&
self
,
bool
use
)
{
self
.
use_hierarchical_allreduce_
=
use
;
self
.
use_hierarchical_allreduce_
=
use
;
})
})
.
def_property
(
"hierarchical_allreduce_inter_nranks"
,
.
def_property
(
"hierarchical_allreduce_inter_nranks"
,
[](
const
BuildStrategy
&
self
)
{
[](
const
BuildStrategy
&
self
)
{
return
self
.
hierarchical_allreduce_inter_nranks_
;
return
self
.
hierarchical_allreduce_inter_nranks_
;
},
},
...
@@ -3894,7 +4096,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3894,7 +4096,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
fuse_elewise_add_act_ops_
;
return
self
.
fuse_elewise_add_act_ops_
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3919,7 +4122,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3919,7 +4122,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_gemm_epilogue"
,
"fuse_gemm_epilogue"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_gemm_epilogue_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_gemm_epilogue_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3944,7 +4148,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3944,7 +4148,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_bn_act_ops"
,
"fuse_bn_act_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_act_ops_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3969,7 +4174,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3969,7 +4174,8 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_bn_add_act_ops"
,
"fuse_bn_add_act_ops"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_add_act_ops_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_bn_add_act_ops_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -3994,7 +4200,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -3994,7 +4200,8 @@ All parameter, weight, gradient are variables in Paddle.
"enable_auto_fusion"
,
"enable_auto_fusion"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_auto_fusion_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_auto_fusion_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -4022,7 +4229,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4022,7 +4229,8 @@ All parameter, weight, gradient are variables in Paddle.
return
self
.
fuse_relu_depthwise_conv_
;
return
self
.
fuse_relu_depthwise_conv_
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -4045,13 +4253,15 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4045,13 +4253,15 @@ All parameter, weight, gradient are variables in Paddle.
build_strategy = static.BuildStrategy()
build_strategy = static.BuildStrategy()
build_strategy.fuse_relu_depthwise_conv = True
build_strategy.fuse_relu_depthwise_conv = True
)DOC"
)
)DOC"
)
.
def_property
(
"fuse_broadcast_ops"
,
.
def_property
(
"fuse_broadcast_ops"
,
[](
const
BuildStrategy
&
self
)
{
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_broadcast_ops_
==
true
||
return
self
.
fuse_broadcast_ops_
==
true
||
self
.
fuse_broadcast_ops_
==
paddle
::
none
;
self
.
fuse_broadcast_ops_
==
paddle
::
none
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, "
"BuildStrategy has been finlaized, "
"cannot be configured again."
));
"cannot be configured again."
));
...
@@ -4075,13 +4285,15 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4075,13 +4285,15 @@ All parameter, weight, gradient are variables in Paddle.
build_strategy = static.BuildStrategy()
build_strategy = static.BuildStrategy()
build_strategy.fuse_broadcast_ops = True
build_strategy.fuse_broadcast_ops = True
)DOC"
)
)DOC"
)
.
def_property
(
"fuse_all_optimizer_ops"
,
.
def_property
(
"fuse_all_optimizer_ops"
,
[](
const
BuildStrategy
&
self
)
{
[](
const
BuildStrategy
&
self
)
{
return
self
.
fuse_all_optimizer_ops_
==
true
||
return
self
.
fuse_all_optimizer_ops_
==
true
||
self
.
fuse_all_optimizer_ops_
==
paddle
::
none
;
self
.
fuse_all_optimizer_ops_
==
paddle
::
none
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, "
"BuildStrategy has been finlaized, "
"cannot be configured again."
));
"cannot be configured again."
));
...
@@ -4091,7 +4303,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4091,7 +4303,8 @@ All parameter, weight, gradient are variables in Paddle.
"sync_batch_norm"
,
"sync_batch_norm"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
sync_batch_norm_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
sync_batch_norm_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
[](
BuildStrategy
&
self
,
bool
b
)
{
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
PADDLE_ENFORCE_NE
(
self
.
IsFinalized
(),
true
,
platform
::
errors
::
PreconditionNotMet
(
platform
::
errors
::
PreconditionNotMet
(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finlaized, cannot be "
"configured again."
));
"configured again."
));
...
@@ -4169,7 +4382,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4169,7 +4382,8 @@ All parameter, weight, gradient are variables in Paddle.
self
.
is_distribution_
=
b
;
self
.
is_distribution_
=
b
;
#endif
#endif
})
})
.
def_property
(
"async_mode"
,
.
def_property
(
"async_mode"
,
[](
const
BuildStrategy
&
self
)
{
return
self
.
async_mode_
;
},
[](
const
BuildStrategy
&
self
)
{
return
self
.
async_mode_
;
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
async_mode_
=
b
;
})
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
async_mode_
=
b
;
})
.
def_property
(
.
def_property
(
...
@@ -4187,7 +4401,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4187,7 +4401,8 @@ All parameter, weight, gradient are variables in Paddle.
self
.
fuse_all_reduce_ops_
==
paddle
::
none
;
self
.
fuse_all_reduce_ops_
==
paddle
::
none
;
},
},
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
fuse_all_reduce_ops_
=
b
;
})
[](
BuildStrategy
&
self
,
bool
b
)
{
self
.
fuse_all_reduce_ops_
=
b
;
})
.
def_property
(
"enable_backward_optimizer_op_deps"
,
.
def_property
(
"enable_backward_optimizer_op_deps"
,
[](
const
BuildStrategy
&
self
)
{
[](
const
BuildStrategy
&
self
)
{
return
self
.
enable_backward_optimizer_op_deps_
;
return
self
.
enable_backward_optimizer_op_deps_
;
},
},
...
@@ -4213,7 +4428,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4213,7 +4428,8 @@ All parameter, weight, gradient are variables in Paddle.
[](
BuildStrategy
&
self
,
bool
fix_op_run_order
)
{
[](
BuildStrategy
&
self
,
bool
fix_op_run_order
)
{
self
.
fix_op_run_order_
=
fix_op_run_order
;
self
.
fix_op_run_order_
=
fix_op_run_order
;
})
})
.
def_property
(
"allow_cuda_graph_capture"
,
.
def_property
(
"allow_cuda_graph_capture"
,
[](
const
BuildStrategy
&
self
)
{
[](
const
BuildStrategy
&
self
)
{
return
self
.
allow_cuda_graph_capture_
;
return
self
.
allow_cuda_graph_capture_
;
},
},
...
@@ -4226,7 +4442,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4226,7 +4442,8 @@ All parameter, weight, gradient are variables in Paddle.
new_bs
.
ClearFinalized
();
new_bs
.
ClearFinalized
();
return
new_bs
;
return
new_bs
;
})
})
.
def
(
"_finalize_strategy_and_create_passes"
,
.
def
(
"_finalize_strategy_and_create_passes"
,
[](
BuildStrategy
&
self
)
->
std
::
shared_ptr
<
ir
::
PassBuilder
>
{
[](
BuildStrategy
&
self
)
->
std
::
shared_ptr
<
ir
::
PassBuilder
>
{
return
self
.
CreatePassesFromStrategy
(
true
);
return
self
.
CreatePassesFromStrategy
(
true
);
},
},
...
@@ -4241,14 +4458,19 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4241,14 +4458,19 @@ All parameter, weight, gradient are variables in Paddle.
});
});
pe
.
def
(
py
::
init
<
const
std
::
vector
<
platform
::
Place
>
&
,
pe
.
def
(
py
::
init
<
const
std
::
vector
<
platform
::
Place
>
&
,
const
std
::
vector
<
std
::
string
>
&
,
const
std
::
string
&
,
const
std
::
vector
<
std
::
string
>
&
,
Scope
*
,
std
::
vector
<
Scope
*>
&
,
const
ExecutionStrategy
&
,
const
std
::
string
&
,
const
BuildStrategy
&
,
ir
::
Graph
*>
())
Scope
*
,
std
::
vector
<
Scope
*>
&
,
const
ExecutionStrategy
&
,
const
BuildStrategy
&
,
ir
::
Graph
*>
())
// NOTE: even we return a vec<Scope*>* to Python use reference policy.
// NOTE: even we return a vec<Scope*>* to Python use reference policy.
// We still cannot get local_scope from this vector, since the element
// We still cannot get local_scope from this vector, since the element
// of vec<Scope*> will be freed by Python GC. We can only return Scope*
// of vec<Scope*> will be freed by Python GC. We can only return Scope*
// one by one and mark them as reference.
// one by one and mark them as reference.
.
def
(
"local_scopes"
,
.
def
(
"local_scopes"
,
[](
ParallelExecutor
&
self
)
->
std
::
vector
<
Scope
*>
*
{
[](
ParallelExecutor
&
self
)
->
std
::
vector
<
Scope
*>
*
{
return
&
self
.
GetLocalScopes
();
return
&
self
.
GetLocalScopes
();
},
},
...
@@ -4284,7 +4506,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4284,7 +4506,8 @@ All parameter, weight, gradient are variables in Paddle.
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>>
(
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>>
(
m
,
"IpuBackend"
)
m
,
"IpuBackend"
)
// manage IpuBackend in C++
// manage IpuBackend in C++
.
def
(
"get_instance"
,
.
def
(
"get_instance"
,
[]()
{
[]()
{
return
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>
(
return
std
::
unique_ptr
<
platform
::
ipu
::
IpuBackend
,
py
::
nodelete
>
(
platform
::
ipu
::
IpuBackend
::
GetInstance
());
platform
::
ipu
::
IpuBackend
::
GetInstance
());
...
@@ -4330,7 +4553,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4330,7 +4553,8 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Failed to convert type: %s when set IpuStrategy "
"Failed to convert type: %s when set IpuStrategy "
"option: %s"
,
"option: %s"
,
option
.
get_type
(),
option_name
));
option
.
get_type
(),
option_name
));
}
}
self
.
InsertStringOption
(
option_name
,
option_val
);
self
.
InsertStringOption
(
option_name
,
option_val
);
}
}
...
@@ -4338,7 +4562,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4338,7 +4562,8 @@ All parameter, weight, gradient are variables in Paddle.
if
(
option_name
.
rfind
(
"location_"
,
0
)
==
0
)
{
if
(
option_name
.
rfind
(
"location_"
,
0
)
==
0
)
{
for
(
auto
option
:
element
.
second
.
cast
<
py
::
dict
>
())
{
for
(
auto
option
:
element
.
second
.
cast
<
py
::
dict
>
())
{
self
.
SetTensorLocation
(
self
.
SetTensorLocation
(
option_name
,
option
.
first
.
cast
<
std
::
string
>
(),
option_name
,
option
.
first
.
cast
<
std
::
string
>
(),
option
.
second
.
cast
<
std
::
uint64_t
>
());
option
.
second
.
cast
<
std
::
uint64_t
>
());
}
}
}
else
if
(
option_name
==
"accumulate_outer_fragment"
)
{
}
else
if
(
option_name
==
"accumulate_outer_fragment"
)
{
...
@@ -4386,17 +4611,19 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -4386,17 +4611,19 @@ All parameter, weight, gradient are variables in Paddle.
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Failed to convert value type: %s when set "
"Failed to convert value type: %s when set "
"IpuStrategy option: %s"
,
"IpuStrategy option: %s"
,
option
.
second
.
get_type
(),
option_key
));
option
.
second
.
get_type
(),
option_key
));
}
}
self
.
InsertStringPairOption
(
option_name
,
option_key
,
self
.
InsertStringPairOption
(
option_val
);
option_name
,
option_key
,
option_val
);
}
}
}
}
}
else
{
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Invalid IpuStrategy option value type: %s, please check "
"Invalid IpuStrategy option value type: %s, please check "
"input value for option: %s"
,
"input value for option: %s"
,
element
.
second
.
get_type
(),
option_name
));
element
.
second
.
get_type
(),
option_name
));
}
}
}
}
})
})
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录