Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
6a1957e7
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
6a1957e7
编写于
5月 01, 2021
作者:
B
Baibaifan
提交者:
GitHub
5月 01, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
slove develop bugs (#32560) (#32684)
上级
2c1ed9b8
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
6 addition
and
6 deletion
+6
-6
paddle/fluid/operators/collective/c_sync_comm_stream_op.cc
paddle/fluid/operators/collective/c_sync_comm_stream_op.cc
+0
-2
paddle/fluid/pybind/ascend_wrapper_py.cc
paddle/fluid/pybind/ascend_wrapper_py.cc
+2
-0
python/paddle/distributed/fleet/launch.py
python/paddle/distributed/fleet/launch.py
+2
-2
python/paddle/distributed/fleet/launch_utils.py
python/paddle/distributed/fleet/launch_utils.py
+2
-2
未找到文件。
paddle/fluid/operators/collective/c_sync_comm_stream_op.cc
浏览文件 @
6a1957e7
...
...
@@ -63,7 +63,6 @@ class CSyncCommStreamCudaKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
place
=
ctx
.
GetPlace
();
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
int
ring_id
=
ctx
.
Attr
<
int
>
(
"ring_id"
);
auto
stream
=
platform
::
NCCLCommContext
::
Instance
().
Get
(
ring_id
,
place
)
->
stream
();
...
...
@@ -75,7 +74,6 @@ class CSyncCommStreamCudaKernel : public framework::OpKernel<T> {
#endif
#elif defined(PADDLE_WITH_ASCEND_CL)
auto
place
=
ctx
.
GetPlace
();
PADDLE_ENFORCE_EQ
(
is_npu_place
(
place
),
true
,
platform
::
errors
::
PreconditionNotMet
(
"Sync stream op can run on npu place only for now."
));
...
...
paddle/fluid/pybind/ascend_wrapper_py.cc
浏览文件 @
6a1957e7
...
...
@@ -108,12 +108,14 @@ enum AttrType {
AT_NAMEATTR
};
#ifdef PADDLE_WITH_ASCEND
void
BindAscendDevice
(
py
::
module
*
m
)
{
py
::
class_
<
platform
::
ascend
::
NPUDevice
>
(
*
m
,
"NPUDevice"
)
.
def_static
(
"get_device_count"
,
static_cast
<
int
(
*
)()
>
(
&
platform
::
ascend
::
NPUDevice
::
GetDeviceCount
));
}
#endif
void
BindAscendGraph
(
py
::
module
*
m
)
{
m
->
def
(
"ge_initialize"
,
&
ge_initialize
,
"GEInitialize"
);
...
...
python/paddle/distributed/fleet/launch.py
浏览文件 @
6a1957e7
...
...
@@ -325,8 +325,8 @@ def which_distributed_mode(args):
if
fluid
.
core
.
is_compiled_with_cuda
():
accelerators
=
fluid
.
core
.
get_cuda_device_count
()
elif
fluid
.
core
.
is_compiled_with_
ascend
():
accelerators
=
fluid
.
core
.
NPUDevice
.
get
_device_count
()
elif
fluid
.
core
.
is_compiled_with_
npu
():
accelerators
=
fluid
.
core
.
get_npu
_device_count
()
elif
fluid
.
core
.
is_compiled_with_xpu
():
accelerators
=
fluid
.
core
.
get_xpu_device_count
()
else
:
...
...
python/paddle/distributed/fleet/launch_utils.py
浏览文件 @
6a1957e7
...
...
@@ -653,8 +653,8 @@ def get_xpus(xpus):
def
get_device_mode
():
if
fluid
.
core
.
is_compiled_with_
ascend
()
and
\
fluid
.
core
.
NPUDevice
.
get
_device_count
()
>
0
:
if
fluid
.
core
.
is_compiled_with_
npu
()
and
\
fluid
.
core
.
get_npu
_device_count
()
>
0
:
print
(
"launch train in ascend npu mode!"
)
return
DeviceMode
.
ASCEND_NPU
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录