Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
5988d0c0
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5988d0c0
编写于
6月 29, 2018
作者:
Y
Yu Yang
提交者:
GitHub
6月 29, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #11733 from reyoung/feature/refactor_op
Refactor Operator.cc, and clean code
上级
02e521e3
5e23a5ec
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
112 addition
and
60 deletion
+112
-60
doc/fluid/design/multi_devices/kernel_selection.md
doc/fluid/design/multi_devices/kernel_selection.md
+2
-2
paddle/fluid/framework/data_transform.cc
paddle/fluid/framework/data_transform.cc
+10
-10
paddle/fluid/framework/data_transform.h
paddle/fluid/framework/data_transform.h
+9
-6
paddle/fluid/framework/op_kernel_type.h
paddle/fluid/framework/op_kernel_type.h
+1
-1
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+76
-41
paddle/fluid/framework/operator.h
paddle/fluid/framework/operator.h
+14
-0
未找到文件。
doc/fluid/design/multi_devices/kernel_selection.md
浏览文件 @
5988d0c0
...
...
@@ -74,10 +74,10 @@ void OperatorWithKernel::Run(
auto
kernel_type_for_var
=
this
->
GetKernelTypeForVar
(...);
if
(
kernel_type_for_var
.
place_
!=
expected_kernel_key
.
place_
)
{
auto
*
trans_var
=
new_scope
.
Var
(
var_name
);
auto
*
out
=
DataTransform
(
expected_kernel_key
,
auto
*
out
=
TransformData
(
expected_kernel_key
,
kernel_type_for_var
,
*
tensor_in
);
CopyVariableWithTensor
(...);
SetTensorToVariable
(...);
}
}
...
...
paddle/fluid/framework/data_transform.cc
浏览文件 @
5988d0c0
...
...
@@ -21,14 +21,14 @@ limitations under the License. */
namespace
paddle
{
namespace
framework
{
static
void
PassTensorData
(
Tensor
*
from
,
Tensor
*
to
)
{
static
void
PassTensorData
(
Tensor
*
from
,
Tensor
*
to
)
{
to
->
ShareDataWith
(
*
from
);
*
from
=
Tensor
();
}
void
DataTransform
(
const
OpKernelType
&
expected_kernel_type
,
const
OpKernelType
&
kernel_type_for_var
,
const
Tensor
&
input_tensor
,
Tensor
*
output_tensor
)
{
void
TransformData
(
const
OpKernelType
&
expected_kernel_type
,
const
OpKernelType
&
kernel_type_for_var
,
const
Tensor
&
input_tensor
,
Tensor
*
output_tensor
)
{
bool
transformed
=
false
;
Tensor
in
;
in
.
ShareDataWith
(
input_tensor
);
...
...
@@ -89,17 +89,17 @@ void DataTransform(const OpKernelType& expected_kernel_type,
output_tensor
->
ShareDataWith
(
in
);
}
void
CopyVariableWithTensor
(
const
Variable
&
in_var
,
const
Tensor
&
tensor
,
Variable
*
out_var
)
{
void
SetTensorToVariable
(
const
Variable
&
in_var
,
const
Tensor
&
tensor
,
Variable
*
out_var
)
{
if
(
in_var
.
IsType
<
LoDTensor
>
())
{
auto
&
in_lod_tensor
=
in_var
.
Get
<
LoDTensor
>
();
auto
*
tran_lod_tensor
=
out_var
->
GetMutable
<
LoDTensor
>
();
auto
&
in_lod_tensor
=
in_var
.
Get
<
LoDTensor
>
();
auto
*
tran_lod_tensor
=
out_var
->
GetMutable
<
LoDTensor
>
();
tran_lod_tensor
->
set_lod
(
in_lod_tensor
.
lod
());
tran_lod_tensor
->
set_layout
(
in_lod_tensor
.
layout
());
tran_lod_tensor
->
ShareDataWith
(
tensor
);
}
else
if
(
in_var
.
IsType
<
SelectedRows
>
())
{
auto
&
in_selected_rows
=
in_var
.
Get
<
SelectedRows
>
();
auto
*
trans_selected_rows
=
out_var
->
GetMutable
<
SelectedRows
>
();
auto
&
in_selected_rows
=
in_var
.
Get
<
SelectedRows
>
();
auto
*
trans_selected_rows
=
out_var
->
GetMutable
<
SelectedRows
>
();
trans_selected_rows
->
set_height
(
in_selected_rows
.
height
());
trans_selected_rows
->
set_rows
(
in_selected_rows
.
rows
());
trans_selected_rows
->
mutable_value
()
->
ShareDataWith
(
tensor
);
...
...
paddle/fluid/framework/data_transform.h
浏览文件 @
5988d0c0
...
...
@@ -30,12 +30,15 @@ limitations under the License. */
namespace
paddle
{
namespace
framework
{
void
DataTransform
(
const
OpKernelType
&
expected_kernel_type
,
const
OpKernelType
&
kernel_type_for_var
,
const
Tensor
&
input_tensor
,
Tensor
*
out
);
void
CopyVariableWithTensor
(
const
Variable
&
in_var
,
const
Tensor
&
tensor
,
Variable
*
out_var
);
void
TransformData
(
const
OpKernelType
&
expected_kernel_type
,
const
OpKernelType
&
kernel_type_for_var
,
const
Tensor
&
input_tensor
,
Tensor
*
out
);
/**
* Set OutVar from InVar, except the tensor is shared with `tensor`
*/
void
SetTensorToVariable
(
const
Variable
&
in_var
,
const
Tensor
&
tensor
,
Variable
*
out_var
);
}
// namespace framework
}
// namespace paddle
paddle/fluid/framework/op_kernel_type.h
浏览文件 @
5988d0c0
...
...
@@ -97,7 +97,7 @@ inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) {
return
ret
;
}
inline
bool
TransFromNeeded
(
const
OpKernelType
&
l
,
const
OpKernelType
&
r
)
{
inline
bool
NeedTransform
(
const
OpKernelType
&
l
,
const
OpKernelType
&
r
)
{
return
(
!
platform
::
places_are_same_class
(
l
.
place_
,
r
.
place_
))
||
(
l
.
data_type_
!=
r
.
data_type_
)
||
NeedTransformLayout
(
l
.
data_layout_
,
r
.
data_layout_
);
...
...
paddle/fluid/framework/operator.cc
浏览文件 @
5988d0c0
...
...
@@ -620,8 +620,6 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
"There are no kernels which are registered in the %s operator."
,
type_
);
}
ExecutionContext
ctx
(
*
this
,
scope
,
*
dev_ctx
);
OpKernelMap
&
kernels
=
kernels_iter
->
second
;
// TODO(dzhwinter) : kernel fallback mechanism will be added when all the
...
...
@@ -631,7 +629,8 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
// Do selection
// }
auto
expected_kernel_key
=
this
->
GetExpectedKernelType
(
ctx
);
auto
expected_kernel_key
=
this
->
GetExpectedKernelType
(
ExecutionContext
(
*
this
,
scope
,
*
dev_ctx
));
VLOG
(
3
)
<<
"expected_kernel_key:"
<<
expected_kernel_key
;
auto
kernel_iter
=
kernels
.
find
(
expected_kernel_key
);
...
...
@@ -640,56 +639,34 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
KernelTypeToString
(
expected_kernel_key
));
}
// do data transform
Scope
&
new_scope
=
scope
.
NewScope
();
// do data transformScope &transfer_scope;
std
::
vector
<
std
::
string
>
transfered_inplace_vars
;
auto
*
transfer_scope
=
TryTransferData
(
scope
,
expected_kernel_key
,
&
transfered_inplace_vars
);
std
::
vector
<
std
::
string
>
inplace_vars
;
for
(
auto
&
var_name_item
:
this
->
Inputs
())
{
for
(
auto
&
var_name
:
var_name_item
.
second
)
{
auto
*
var
=
scope
.
FindVar
(
var_name
);
if
(
var
&&
VarIsTensor
(
var
))
{
auto
*
tensor_in
=
GetTensorFromVar
(
var
);
if
(
tensor_in
->
IsInitialized
())
{
auto
kernel_type_for_var
=
this
->
GetKernelTypeForVar
(
var_name_item
.
first
,
*
tensor_in
,
expected_kernel_key
);
if
(
TransFromNeeded
(
kernel_type_for_var
,
expected_kernel_key
))
{
auto
out_var_names
=
OutputVars
(
true
);
if
(
std
::
find
(
out_var_names
.
begin
(),
out_var_names
.
end
(),
var_name
)
!=
out_var_names
.
end
())
{
inplace_vars
.
push_back
(
var_name
);
}
VLOG
(
3
)
<<
"Transform Variable "
<<
var_name
<<
" from "
<<
kernel_type_for_var
<<
" to "
<<
expected_kernel_key
;
auto
*
trans_var
=
new_scope
.
Var
(
var_name
);
std
::
shared_ptr
<
Tensor
>
out
(
new
Tensor
);
DataTransform
(
expected_kernel_key
,
kernel_type_for_var
,
*
tensor_in
,
out
.
get
());
CopyVariableWithTensor
(
*
var
,
*
(
out
.
get
()),
trans_var
);
}
}
}
}
// exec scope is the scope that kernel actually executed on.
const
Scope
&
exec_scope
=
(
transfer_scope
==
nullptr
?
scope
:
*
transfer_scope
);
if
(
!
(
expected_kernel_key
.
place_
==
dev_ctx
->
GetPlace
()))
{
dev_ctx
=
pool
.
Get
(
expected_kernel_key
.
place_
);
}
auto
*
new_dev_ctx
=
pool
.
Get
(
expected_kernel_key
.
place_
);
kernel_iter
->
second
->
Compute
(
ExecutionContext
(
*
this
,
new_scope
,
*
new_dev_ctx
));
kernel_iter
->
second
->
Compute
(
ExecutionContext
(
*
this
,
exec_scope
,
*
dev_ctx
));
for
(
auto
&
var_name
:
inplace_vars
)
{
VLOG
(
3
)
<<
"share inplace var "
+
var_name
+
" back to it's original scope"
;
auto
*
original_tensor
=
GetMutableTensorFromVar
(
scope
.
FindVar
(
var_name
));
auto
*
transformed_tensor
=
GetTensorFromVar
(
new_scope
.
FindVar
(
var_name
));
original_tensor
->
ShareDataWith
(
*
transformed_tensor
);
if
(
!
transfered_inplace_vars
.
empty
())
{
// there is inplace variable has been transfered.
TransferInplaceVarsBack
(
scope
,
transfered_inplace_vars
,
*
transfer_scope
);
}
/*For profiling/benchmark only*/
if
(
FLAGS_benchmark
)
{
new_
dev_ctx
->
Wait
();
dev_ctx
->
Wait
();
}
if
(
FLAGS_check_nan_inf
)
{
for
(
auto
&
vname
:
OutputVars
(
true
))
{
auto
*
var
=
new
_scope
.
FindVar
(
vname
);
auto
*
var
=
exec
_scope
.
FindVar
(
vname
);
if
(
var
==
nullptr
)
continue
;
if
(
var
->
IsType
<
framework
::
LoDTensor
>
())
{
CheckTensorNANOrInf
(
vname
,
var
->
Get
<
framework
::
LoDTensor
>
());
...
...
@@ -697,6 +674,64 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
}
}
}
void
OperatorWithKernel
::
TransferInplaceVarsBack
(
const
Scope
&
scope
,
const
std
::
vector
<
std
::
string
>&
inplace_vars
,
const
Scope
&
transfer_scope
)
const
{
for
(
auto
&
var_name
:
inplace_vars
)
{
VLOG
(
3
)
<<
"share inplace var "
+
var_name
+
" back to it's original scope"
;
auto
*
original_tensor
=
GetMutableTensorFromVar
(
scope
.
FindVar
(
var_name
));
auto
*
transformed_tensor
=
GetTensorFromVar
(
transfer_scope
.
FindVar
(
var_name
));
original_tensor
->
ShareDataWith
(
*
transformed_tensor
);
}
}
Scope
*
OperatorWithKernel
::
TryTransferData
(
const
Scope
&
scope
,
const
OpKernelType
&
expected_kernel_key
,
std
::
vector
<
std
::
string
>*
transfered_inplace_vars
)
const
{
Scope
*
new_scope
=
nullptr
;
for
(
auto
&
var_name_item
:
Inputs
())
{
for
(
auto
&
var_name
:
var_name_item
.
second
)
{
auto
*
var
=
scope
.
FindVar
(
var_name
);
// Only tensor can be tranfer to another device.
if
(
var
==
nullptr
||
!
VarIsTensor
(
var
))
{
continue
;
}
auto
*
tensor_in
=
GetTensorFromVar
(
var
);
if
(
!
tensor_in
->
IsInitialized
())
{
continue
;
}
auto
kernel_type_for_var
=
GetKernelTypeForVar
(
var_name_item
.
first
,
*
tensor_in
,
expected_kernel_key
);
if
(
!
NeedTransform
(
kernel_type_for_var
,
expected_kernel_key
))
{
continue
;
}
auto
out_var_names
=
OutputVars
(
true
);
if
(
std
::
find
(
out_var_names
.
begin
(),
out_var_names
.
end
(),
var_name
)
!=
out_var_names
.
end
())
{
transfered_inplace_vars
->
emplace_back
(
var_name
);
}
VLOG
(
3
)
<<
"Transform Variable "
<<
var_name
<<
" from "
<<
kernel_type_for_var
<<
" to "
<<
expected_kernel_key
;
if
(
new_scope
==
nullptr
)
{
new_scope
=
&
scope
.
NewScope
();
}
auto
*
trans_var
=
new_scope
->
Var
(
var_name
);
Tensor
out
;
TransformData
(
expected_kernel_key
,
kernel_type_for_var
,
*
tensor_in
,
&
out
);
SetTensorToVariable
(
*
var
,
out
,
trans_var
);
}
}
return
new_scope
;
}
proto
::
VarType
::
Type
OperatorWithKernel
::
IndicateDataType
(
const
ExecutionContext
&
ctx
)
const
{
...
...
paddle/fluid/framework/operator.h
浏览文件 @
5988d0c0
...
...
@@ -384,6 +384,20 @@ class OperatorWithKernel : public OperatorBase {
// same.
proto
::
VarType
::
Type
IndicateDataType
(
const
ExecutionContext
&
ctx
)
const
;
void
RunImpl
(
const
Scope
&
scope
,
const
platform
::
Place
&
place
)
const
final
;
/**
* Transfer data from scope to a transfered scope. If there is no data need to
* be tranfered, it returns nullptr.
*
* * transfered_inplace_vars is a output vector.
*/
Scope
*
TryTransferData
(
const
Scope
&
scope
,
const
OpKernelType
&
expected_kernel_key
,
std
::
vector
<
std
::
string
>*
transfered_inplace_vars
)
const
;
void
TransferInplaceVarsBack
(
const
Scope
&
scope
,
const
std
::
vector
<
std
::
string
>&
inplace_vars
,
const
Scope
&
exec_scope
)
const
;
};
extern
bool
OpSupportGPU
(
const
std
::
string
&
op_type
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录