Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
6e3856d3
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
6e3856d3
编写于
2月 04, 2021
作者:
W
WangXi
提交者:
GitHub
2月 04, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix xpu dygraph place (#30868)
上级
35c5b23f
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
55 addition
and
55 deletion
+55
-55
paddle/fluid/imperative/tests/test_tracer.cc
paddle/fluid/imperative/tests/test_tracer.cc
+24
-4
paddle/fluid/imperative/tracer.cc
paddle/fluid/imperative/tracer.cc
+20
-0
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+1
-1
paddle/fluid/pybind/imperative.cc
paddle/fluid/pybind/imperative.cc
+0
-16
paddle/fluid/pybind/tensor_py.h
paddle/fluid/pybind/tensor_py.h
+10
-34
未找到文件。
paddle/fluid/imperative/tests/test_tracer.cc
浏览文件 @
6e3856d3
...
...
@@ -305,10 +305,30 @@ TEST(test_tracer, test_expected_place) {
// default expected place is CPUPlace
imperative
::
Tracer
tracer
;
ASSERT_EQ
(
platform
::
is_cpu_place
(
tracer
.
ExpectedPlace
()),
true
);
// set to CUDAPlace
platform
::
CUDAPlace
gpu_place
(
0
);
tracer
.
SetExpectedPlace
(
gpu_place
);
ASSERT_EQ
(
platform
::
is_gpu_place
(
tracer
.
ExpectedPlace
()),
true
);
{
#ifdef PADDLE_WITH_CUDA
// set to CUDAPlace
platform
::
CUDAPlace
gpu_place
(
0
);
tracer
.
SetExpectedPlace
(
gpu_place
);
ASSERT_EQ
(
platform
::
is_gpu_place
(
tracer
.
ExpectedPlace
()),
true
);
// assert throw
platform
::
XPUPlace
xpu_place
(
0
);
ASSERT_THROW
(
tracer
.
SetExpectedPlace
(
xpu_place
),
platform
::
EnforceNotMet
);
#endif
}
{
#ifdef PADDLE_WITH_XPU
// set to XPUPlace
platform
::
XPUPlace
xpu_place
(
0
);
tracer
.
SetExpectedPlace
(
xpu_place
);
ASSERT_EQ
(
platform
::
is_xpu_place
(
tracer
.
ExpectedPlace
()),
true
);
// assert throw
platform
::
CUDAPlace
cuda_place
(
0
);
ASSERT_THROW
(
tracer
.
SetExpectedPlace
(
cuda_place
),
platform
::
EnforceNotMet
);
#endif
}
}
TEST
(
test_tracer
,
test_var_without_grad_var
)
{
...
...
paddle/fluid/imperative/tracer.cc
浏览文件 @
6e3856d3
...
...
@@ -198,6 +198,26 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
inplace_map
);
}
void
Tracer
::
SetExpectedPlace
(
platform
::
Place
place
)
{
// NOTE(wangxi): set device id before launch device kernel
if
(
platform
::
is_gpu_place
(
place
))
{
#ifdef PADDLE_WITH_CUDA
platform
::
SetDeviceId
(
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
place
).
device
);
#else
PADDLE_THROW
(
platform
::
errors
::
PreconditionNotMet
(
"PaddlePaddle should compile with GPU if use CUDAPlace."
));
#endif
}
else
if
(
platform
::
is_xpu_place
(
place
))
{
#ifdef PADDLE_WITH_XPU
platform
::
SetXPUDeviceId
(
BOOST_GET_CONST
(
platform
::
XPUPlace
,
place
).
device
);
#else
PADDLE_THROW
(
platform
::
errors
::
PreconditionNotMet
(
"PaddlePaddle should compile with XPU if use XPUPlace."
));
#endif
}
expected_place_
=
place
;
}
bool
Tracer
::
ComputeRequiredGrad
(
const
NameVarBaseMap
&
ins
,
const
NameVarBaseMap
&
outs
,
bool
trace_backward
)
{
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
6e3856d3
...
...
@@ -99,7 +99,7 @@ class Tracer {
platform
::
Place
ExpectedPlace
()
const
{
return
expected_place_
;
}
void
SetExpectedPlace
(
platform
::
Place
place
)
{
expected_place_
=
place
;
}
void
SetExpectedPlace
(
platform
::
Place
place
)
;
bool
HasGrad
()
const
{
return
has_grad_
;
}
...
...
paddle/fluid/pybind/imperative.cc
浏览文件 @
6e3856d3
...
...
@@ -1207,15 +1207,6 @@ void BindImperative(py::module *m_ptr) {
if
(
py
::
isinstance
<
platform
::
CUDAPlace
>
(
obj
))
{
auto
p
=
obj
.
cast
<
platform
::
CUDAPlace
*>
();
self
.
SetExpectedPlace
(
*
p
);
// NOTE(zhiqiu): When switching cuda place, we need to set the
// cuda device id.
// Otherwise, some cuda API may be launched at other cuda place,
// which may cost hundreds of MB of GPU memory due to the cuda
// lib.
#ifdef PADDLE_WITH_CUDA
platform
::
SetDeviceId
(
p
->
device
);
#endif
VLOG
(
4
)
<<
"Tracer("
<<
&
self
<<
")"
<<
" set expected place "
<<
*
p
;
}
else
if
(
py
::
isinstance
<
platform
::
XPUPlace
>
(
obj
))
{
...
...
@@ -1236,13 +1227,6 @@ void BindImperative(py::module *m_ptr) {
}
else
if
(
py
::
isinstance
<
platform
::
Place
>
(
obj
))
{
auto
p
=
obj
.
cast
<
platform
::
Place
*>
();
self
.
SetExpectedPlace
(
*
p
);
if
(
platform
::
is_gpu_place
(
*
p
))
{
// NOTE(zhiqu): same as obj is CUDAPlace.
#ifdef PADDLE_WITH_CUDA
platform
::
SetDeviceId
(
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
*
p
).
device
);
#endif
}
VLOG
(
4
)
<<
"Tracer("
<<
&
self
<<
")"
<<
" set expected place "
<<
*
p
;
}
else
{
...
...
paddle/fluid/pybind/tensor_py.h
浏览文件 @
6e3856d3
...
...
@@ -259,38 +259,6 @@ void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
}
}
// NOTE(wangxi): When copying data to the accelerator card,
// we need set_device(dev_id) first.
template
<
typename
P
>
static
int
GetDeviceId
(
const
P
&
place
)
{
// for CPUPlace and CUDAPinnedPlace.
PADDLE_THROW
(
platform
::
errors
::
PermissionDenied
(
"Paddle can't Get CPUPlace or CUDAPinnedPlace Device Id."
));
}
template
<
>
int
GetDeviceId
<
platform
::
CUDAPlace
>
(
const
platform
::
CUDAPlace
&
place
)
{
return
place
.
GetDeviceId
();
}
template
<
>
int
GetDeviceId
<
platform
::
XPUPlace
>
(
const
platform
::
XPUPlace
&
place
)
{
return
place
.
GetDeviceId
();
}
// NOTE(wangxi16): Used by VarBase __setitem__
template
<
>
int
GetDeviceId
<
platform
::
Place
>
(
const
platform
::
Place
&
place
)
{
if
(
paddle
::
platform
::
is_gpu_place
(
place
))
{
return
GetDeviceId
(
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
place
));
}
else
if
(
paddle
::
platform
::
is_xpu_place
(
place
))
{
return
GetDeviceId
(
BOOST_GET_CONST
(
platform
::
XPUPlace
,
place
));
}
// for CPUPlace and CUDAPinnedPlace.
PADDLE_THROW
(
platform
::
errors
::
PermissionDenied
(
"Paddle can't Get CPUPlace or CUDAPinnedPlace Device Id."
));
}
template
<
typename
T
,
typename
P
>
void
SetTensorFromPyArrayT
(
framework
::
Tensor
*
self
,
...
...
@@ -314,7 +282,11 @@ void SetTensorFromPyArrayT(
}
}
else
if
(
paddle
::
platform
::
is_xpu_place
(
place
))
{
#ifdef PADDLE_WITH_XPU
platform
::
XPUDeviceGuard
guard
(
GetDeviceId
(
place
));
// NOTE(wangxi): When copying data to the accelerator card,
// we need set_device(dev_id) first.
platform
::
Place
tmp_place
=
place
;
platform
::
XPUDeviceGuard
guard
(
BOOST_GET_CONST
(
platform
::
XPUPlace
,
tmp_place
).
device
);
auto
dst
=
self
->
mutable_data
<
T
>
(
place
);
xpu_memcpy
(
dst
,
array
.
data
(),
array
.
nbytes
(),
XPUMemcpyKind
::
XPU_HOST_TO_DEVICE
);
...
...
@@ -326,7 +298,11 @@ void SetTensorFromPyArrayT(
}
else
{
#ifdef PADDLE_WITH_CUDA
if
(
paddle
::
platform
::
is_gpu_place
(
place
))
{
platform
::
CUDADeviceGuard
guard
(
GetDeviceId
(
place
));
// NOTE(wangxi): When copying data to the accelerator card,
// we need set_device(dev_id) first.
platform
::
Place
tmp_place
=
place
;
platform
::
CUDADeviceGuard
guard
(
BOOST_GET_CONST
(
platform
::
CUDAPlace
,
tmp_place
).
device
);
auto
dst
=
self
->
mutable_data
<
T
>
(
place
);
paddle
::
platform
::
GpuMemcpySync
(
dst
,
array
.
data
(),
array
.
nbytes
(),
cudaMemcpyHostToDevice
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录