Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
5036cf03
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5036cf03
编写于
12月 29, 2017
作者:
Q
QI JUN
提交者:
GitHub
12月 29, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add helper function to get appropriate DeviceContext (#7066)
* add helper function to get appropriate DeviceContext
上级
a096c58e
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
33 addition
and
19 deletion
+33
-19
paddle/framework/data_transform.h
paddle/framework/data_transform.h
+2
-3
paddle/framework/data_transform_test.cc
paddle/framework/data_transform_test.cc
+8
-7
paddle/framework/operator.cc
paddle/framework/operator.cc
+23
-9
未找到文件。
paddle/framework/data_transform.h
浏览文件 @
5036cf03
...
...
@@ -27,8 +27,7 @@ limitations under the License. */
namespace
paddle
{
namespace
framework
{
using
DataTransformFn
=
std
::
function
<
void
(
const
std
::
vector
<
platform
::
DeviceContext
*>
ctx
,
using
DataTransformFn
=
std
::
function
<
void
(
const
platform
::
DeviceContext
*
ctx
,
const
Variable
&
in
,
Variable
*
out
)
>
;
using
KernelTypePair
=
std
::
pair
<
OpKernelType
,
OpKernelType
>
;
...
...
paddle/framework/data_transform_test.cc
浏览文件 @
5036cf03
...
...
@@ -54,18 +54,18 @@ auto kernel1 = GenFromBit({0, 0, 0, 1});
auto
kernel2
=
GenFromBit
({
0
,
0
,
1
,
0
});
auto
kernel3
=
GenFromBit
({
0
,
0
,
1
,
1
});
void
TransDataType_t
(
std
::
vector
<
platform
::
DeviceContext
*>
ctx
,
const
Variable
&
in
,
Variable
*
out
)
{
void
TransDataType_t
(
const
platform
::
DeviceContext
*
ctx
,
const
Variable
&
in
,
Variable
*
out
)
{
test_value
++
;
}
void
TransDataLayout_t
(
std
::
vector
<
platform
::
DeviceContext
*>
ctx
,
const
Variable
&
in
,
Variable
*
out
)
{
void
TransDataLayout_t
(
const
platform
::
DeviceContext
*
ctx
,
const
Variable
&
in
,
Variable
*
out
)
{
test_value
--
;
}
void
TransLibraryType_t
(
std
::
vector
<
platform
::
DeviceContext
*>
ctx
,
const
Variable
&
in
,
Variable
*
out
)
{
void
TransLibraryType_t
(
const
platform
::
DeviceContext
*
ctx
,
const
Variable
&
in
,
Variable
*
out
)
{
test_value
+=
2
;
}
...
...
@@ -83,7 +83,8 @@ TEST(DataTransform, Register) {
using
namespace
paddle
::
platform
;
auto
&
instance
=
DataTransformFnMap
::
Instance
();
std
::
vector
<
DeviceContext
*>
ctx
;
ASSERT_EQ
(
instance
.
Map
().
size
(),
3UL
);
DeviceContext
*
ctx
=
nullptr
;
paddle
::
framework
::
Variable
in
;
paddle
::
framework
::
Variable
out
;
...
...
paddle/framework/operator.cc
浏览文件 @
5036cf03
...
...
@@ -384,6 +384,24 @@ class RuntimeInferShapeContext : public InferShapeContext {
const
Scope
&
scope_
;
};
const
platform
::
DeviceContext
*
GetDeviceContext
(
framework
::
KernelTypePair
&
kernel_pair
)
{
auto
&
actual_kernel_key
=
kernel_pair
.
first
;
auto
&
expected_kernel_key
=
kernel_pair
.
second
;
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
if
(
platform
::
is_gpu_place
(
actual_kernel_key
.
place_
)
&&
platform
::
is_cpu_place
(
expected_kernel_key
.
place_
))
{
return
pool
.
Get
(
actual_kernel_key
.
place_
);
}
else
if
(
platform
::
is_cpu_place
(
actual_kernel_key
.
place_
)
&&
platform
::
is_gpu_place
(
expected_kernel_key
.
place_
))
{
return
pool
.
Get
(
expected_kernel_key
.
place_
);
}
else
{
PADDLE_THROW
(
"Currently, model parallelism is only supported between CPU and CUDA"
);
}
}
void
OperatorWithKernel
::
Run
(
const
Scope
&
scope
,
const
platform
::
Place
&
place
)
const
{
RuntimeInferShapeContext
infer_shape_ctx
(
*
this
,
scope
);
...
...
@@ -418,9 +436,9 @@ void OperatorWithKernel::Run(const Scope& scope,
"CPU and other devices. For example, multi-GPU model "
"parallelism will failed."
);
}
else
{
auto
kernel_pair
=
std
::
make_pair
(
actual_kernel_key
,
expected_kernel_key
);
const
DataTransformFn
*
trans_fun
=
DataTransformFnMap
::
Instance
().
GetNullable
(
std
::
make_pair
(
actual_kernel_key
,
expected_kernel_key
));
DataTransformFnMap
::
Instance
().
GetNullable
(
kernel_pair
);
if
(
trans_fun
)
{
auto
input_vars
=
this
->
InputVars
();
// TODO(qijun) filter the input vars that do not need to be transformed
...
...
@@ -437,22 +455,18 @@ void OperatorWithKernel::Run(const Scope& scope,
}
if
(
!
need_trans
.
empty
())
{
// TODO(qijun) get appropriate DeviceContext from DeviceContext pool
platform
::
DeviceContext
*
trans_dev_ctx
=
nullptr
;
std
::
vector
<
platform
::
DeviceContext
*>
trans_dev_ctx_vec
{
trans_dev_ctx
};
auto
trans_dev_ctx
=
GetDeviceContext
(
kernel_pair
);
// Wait for transform starting
dev_ctx
->
Wait
();
for
(
auto
var_name
:
need_trans
)
{
(
*
trans_fun
)(
trans_dev_ctx
_vec
,
*
(
scope
.
FindVar
(
var_name
)),
(
*
trans_fun
)(
trans_dev_ctx
,
*
(
scope
.
FindVar
(
var_name
)),
scope
.
FindVar
(
var_name
+
framework
::
KernelTypeToString
(
expected_kernel_key
)));
}
// Wait for data transform finishing
for
(
auto
ctx
:
trans_dev_ctx_vec
)
{
ctx
->
Wait
();
}
trans_dev_ctx
->
Wait
();
}
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录