Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
aac8303d
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
aac8303d
编写于
9月 21, 2020
作者:
S
sandyhouse
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update, test=develop
上级
6c16858f
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
83 addition
and
28 deletion
+83
-28
paddle/fluid/framework/section_worker.cc
paddle/fluid/framework/section_worker.cc
+5
-2
paddle/fluid/operators/collective/c_recv_op.cc
paddle/fluid/operators/collective/c_recv_op.cc
+10
-9
paddle/fluid/operators/collective/c_recv_op.cu.cc
paddle/fluid/operators/collective/c_recv_op.cu.cc
+50
-15
paddle/fluid/operators/collective/c_send_op.cu.cc
paddle/fluid/operators/collective/c_send_op.cu.cc
+13
-0
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+5
-2
未找到文件。
paddle/fluid/framework/section_worker.cc
浏览文件 @
aac8303d
...
...
@@ -138,7 +138,7 @@ void SectionWorker::TrainFiles() {
}
}
}
}
catch
(
platform
::
EOFException
&
)
{
}
catch
(
platform
::
EOFException
&
e
)
{
// std::unique_lock<std::mutex> lk(thread_mutex);
// threads_completed = true;
VLOG
(
3
)
<<
"thread completed."
;
...
...
@@ -146,6 +146,8 @@ void SectionWorker::TrainFiles() {
// thread_condition.notify_all();
VLOG
(
3
)
<<
"EOF encountered"
;
// throw platform::EOFException();
// throw e;
PADDLE_THROW_EOF
();
break
;
}
}
...
...
@@ -303,7 +305,7 @@ void SectionWorker::TrainFilesWithProfiler() {
<<
micro_end
.
tv_sec
*
1e6
+
micro_end
.
tv_usec
<<
"]"
<<
std
::
endl
;
}
}
catch
(
platform
::
EOFException
&
)
{
}
catch
(
platform
::
EOFException
&
e
)
{
VLOG
(
3
)
<<
"thread completed."
;
VLOG
(
0
)
<<
"EOF encountered"
;
VLOG
(
0
)
<<
"============timeline============"
;
...
...
@@ -313,6 +315,7 @@ void SectionWorker::TrainFilesWithProfiler() {
<<
", mean_time: "
<<
op_total_time
[
i
]
/
op_count
[
i
];
}
VLOG
(
0
)
<<
"================================"
;
throw
e
;
break
;
}
}
...
...
paddle/fluid/operators/collective/c_recv_op.cc
浏览文件 @
aac8303d
...
...
@@ -40,23 +40,24 @@ class CRecvOp : public framework::OperatorWithKernel {
"The size of the output shape must be greater than 0 "
"but the value given is %d."
,
out_shape
.
size
()));
ctx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
out_shape
));
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
VLOG
(
0
)
<<
"wow1"
;
std
::
string
dtype
=
ctx
.
Attr
<
std
::
string
>
(
"dtype"
);
int
dtype
=
ctx
.
Attr
<
int
>
(
"dtype"
);
framework
::
proto
::
VarType
::
Type
type
;
if
(
dtype
==
"fp32"
)
{
if
(
dtype
==
framework
::
proto
::
VarType
::
FP32
)
{
type
=
framework
::
proto
::
VarType
::
FP32
;
}
else
if
(
dtype
==
"fp64"
)
{
}
else
if
(
dtype
==
framework
::
proto
::
VarType
::
FP64
)
{
type
=
framework
::
proto
::
VarType
::
FP64
;
}
else
if
(
dtype
==
"fp16"
)
{
}
else
if
(
dtype
==
framework
::
proto
::
VarType
::
FP16
)
{
type
=
framework
::
proto
::
VarType
::
FP16
;
}
else
if
(
dtype
==
"int32"
)
{
}
else
if
(
dtype
==
framework
::
proto
::
VarType
::
INT32
)
{
type
=
framework
::
proto
::
VarType
::
INT32
;
}
else
if
(
dtype
==
"int64"
)
{
}
else
if
(
dtype
==
framework
::
proto
::
VarType
::
INT64
)
{
type
=
framework
::
proto
::
VarType
::
INT64
;
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
...
...
@@ -75,9 +76,9 @@ class CRecvOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr
<
int
>
(
"ring_id"
,
"(int default 0) nccl communication ring id."
)
.
SetDefault
(
0
);
AddAttr
<
int
>
(
"peer"
,
"(int default 0) rank id for sender."
).
SetDefault
(
0
);
AddAttr
<
std
::
string
>
(
"dtype"
,
"(std::string default fp32
) data type of tensor."
)
.
SetDefault
(
"fp32"
);
AddAttr
<
int
>
(
"dtype"
,
"(std::string default 5(float32)
) data type of tensor."
)
.
SetDefault
(
5
);
AddAttr
<
std
::
vector
<
int
>>
(
"out_shape"
,
"shape of the output tensor."
)
.
SetDefault
(
std
::
vector
<
int
>
());
AddAttr
<
bool
>
(
...
...
paddle/fluid/operators/collective/c_recv_op.cu.cc
浏览文件 @
aac8303d
...
...
@@ -25,37 +25,72 @@ namespace operators {
template
<
typename
T
>
class
CRecvOpCUDAKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
#if defined(PADDLE_WITH_NCCL)
VLOG
(
0
)
<<
"here1"
;
auto
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
VLOG
(
0
)
<<
"here2"
;
auto
out_shape
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"out_shape"
);
auto
out_dims
=
paddle
::
framework
::
make_ddim
(
out_shape
);
// auto out_shape = ctx.Attr<std::vector<int>>("out_shape");
// auto out_dims = paddle::framework::make_ddim(out_shape);
int
data_type
=
ctx
.
Attr
<
int
>
(
"dtype"
);
framework
::
proto
::
VarType
::
Type
type
=
framework
::
proto
::
VarType
::
Type
(
data_type
);
// if (data_type == framework::proto::VarType::FP32) {
// type = framework::proto::VarType::FP32;
//} else if (data_type == framework::proto::VarType::FP64) {
// type = framework::proto::VarType::FP64;
//} else if (data_type == framework::proto::VarType::FP16) {
// type = framework::proto::VarType::FP16;
//} else if (data_type == framework::proto::VarType::INT32) {
// type = framework::proto::VarType::INT32;
//} else if (data_type == framework::proto::VarType::INT64) {
// type = framework::proto::VarType::INT64;
//} else {
// PADDLE_THROW(platform::errors::InvalidArgument(
// "Unknown data type %s for c_recv op.", data_type));
//}
ncclDataType_t
dtype
=
platform
::
ToNCCLDataType
(
type
);
auto
out_dims
=
out
->
dims
();
int
numel
=
0
;
int
*
numel_ptr
=
nullptr
;
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaMalloc
(
&
numel_ptr
,
sizeof
(
int
)));
int
rid
=
ctx
.
Attr
<
int
>
(
"ring_id"
);
auto
place
=
ctx
.
GetPlace
();
auto
comm
=
platform
::
NCCLCommContext
::
Instance
().
Get
(
rid
,
place
);
out
->
mutable_data
<
T
>
(
out_dims
,
place
);
VLOG
(
0
)
<<
"out_dims:"
<<
out_dims
;
ncclDataType_t
dtype
=
platform
::
ToNCCLDataType
(
out
->
type
());
int
numel
=
out
->
numel
();
VLOG
(
0
)
<<
"numel:"
<<
numel
;
int
peer
=
ctx
.
Attr
<
int
>
(
"peer"
);
PADDLE_ENFORCE_LT
(
peer
,
comm
->
nranks
(),
platform
::
errors
::
InvalidArgument
(
"The value of peer (%d) you set must "
"be less than comm->nranks (%d)."
,
peer
,
comm
->
nranks
()));
cudaStream_t
stream
=
nullptr
;
if
(
ctx
.
Attr
<
bool
>
(
"use_calc_stream"
))
{
auto
dev_ctx
=
platform
::
DeviceContextPool
::
Instance
().
Get
(
place
);
stream
=
static_cast
<
platform
::
CUDADeviceContext
*>
(
dev_ctx
)
->
stream
();
stream
=
static_cast
<
platform
::
CUDADeviceContext
*>
(
dev_ctx
)
->
stream
();
}
else
{
stream
=
comm
->
stream
();
}
PADDLE_ENFORCE_CUDA_SUCCESS
(
platform
::
dynload
::
ncclRecv
(
numel_ptr
,
1
,
ncclInt
,
peer
,
comm
->
comm
(),
stream
));
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaMemcpy
(
&
numel
,
numel_ptr
,
sizeof
(
int
),
cudaMemcpyDeviceToHost
));
VLOG
(
0
)
<<
"numel:"
<<
numel
;
VLOG
(
0
)
<<
"out_dims:"
<<
out_dims
;
int
rest_numel
=
1
;
for
(
size_t
i
=
1
;
i
<
out_dims
.
size
();
++
i
)
{
rest_numel
=
rest_numel
*
out_dims
[
i
];
}
out_dims
[
0
]
=
numel
/
rest_numel
;
VLOG
(
0
)
<<
"out_dims:"
<<
out_dims
;
out
->
mutable_data
<
T
>
(
out_dims
,
place
);
// ncclDataType_t dtype = platform::ToNCCLDataType(out->type());
// numel = out->numel();
// VLOG(0) << "numel:" << numel;
int
peer
=
ctx
.
Attr
<
int
>
(
"peer"
);
PADDLE_ENFORCE_LT
(
peer
,
comm
->
nranks
(),
platform
::
errors
::
InvalidArgument
(
"The value of peer (%d) you set must "
"be less than comm->nranks (%d)."
,
peer
,
comm
->
nranks
()));
VLOG
(
0
)
<<
"here3"
;
PADDLE_ENFORCE_CUDA_SUCCESS
(
platform
::
dynload
::
ncclRecv
(
out
->
data
<
T
>
(),
numel
,
dtype
,
peer
,
comm
->
comm
(),
stream
));
...
...
paddle/fluid/operators/collective/c_send_op.cu.cc
浏览文件 @
aac8303d
...
...
@@ -49,9 +49,22 @@ class CSendOpCUDAKernel : public framework::OpKernel<T> {
platform
::
errors
::
InvalidArgument
(
"The value of peer (%d) you set must "
"be less than comm->nranks (%d)."
,
peer
,
comm
->
nranks
()));
int
*
numel_ptr
=
nullptr
;
VLOG
(
0
)
<<
"numel: "
<<
numel
;
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaMalloc
(
&
numel_ptr
,
sizeof
(
int
)));
PADDLE_ENFORCE_CUDA_SUCCESS
(
cudaMemcpy
(
numel_ptr
,
&
numel
,
sizeof
(
int
),
cudaMemcpyHostToDevice
));
// PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupStart());
VLOG
(
0
)
<<
"wawa1"
;
PADDLE_ENFORCE_CUDA_SUCCESS
(
platform
::
dynload
::
ncclSend
(
numel_ptr
,
1
,
ncclInt
,
peer
,
comm
->
comm
(),
stream
));
VLOG
(
0
)
<<
"wawa2"
;
PADDLE_ENFORCE_CUDA_SUCCESS
(
platform
::
dynload
::
ncclSend
(
x
->
data
<
T
>
(),
numel
,
dtype
,
peer
,
comm
->
comm
(),
stream
));
VLOG
(
0
)
<<
"wawa3"
;
// PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclGroupEnd());
VLOG
(
0
)
<<
"wawa4"
;
VLOG
(
3
)
<<
"rank "
<<
comm
->
rank
()
<<
" send "
<<
framework
::
product
(
x
->
dims
())
<<
" to "
<<
peer
;
#else
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
aac8303d
...
...
@@ -3983,6 +3983,7 @@ class PipelineOptimizer(object):
outputs
=
{
'Out'
:
[
new_var
]},
attrs
=
{
'out_shape'
:
new_var
.
shape
,
'dtype'
:
new_var
.
dtype
,
self
.
_op_device_key
:
device
,
self
.
_op_role_key
:
self
.
_op_role
.
Forward
,
'peer'
:
first_dev_index
...
...
@@ -4137,7 +4138,7 @@ class PipelineOptimizer(object):
attrs
=
{
self
.
_op_device_key
:
prev_device_spec
,
self
.
_op_role_key
:
op_role
,
'peer'
:
prev
_device_index
'peer'
:
cur
_device_index
})
extra_index
+=
1
block
.
_insert_op
(
...
...
@@ -4146,9 +4147,10 @@ class PipelineOptimizer(object):
outputs
=
{
'Out'
:
[
var
]},
attrs
=
{
'out_shape'
:
var
.
shape
,
'dtype'
:
var
.
dtype
,
self
.
_op_device_key
:
cur_device_spec
,
self
.
_op_role_key
:
op_role
,
'peer'
:
cur
_device_index
'peer'
:
prev
_device_index
})
extra_index
+=
1
...
...
@@ -4324,6 +4326,7 @@ class PipelineOptimizer(object):
outputs
=
{
'Out'
:
[
read_block
.
var
(
var_name
)]},
attrs
=
{
'out_shape'
:
read_block
.
var
(
var_name
).
shape
,
'dtype'
:
read_block
.
var
(
var_name
).
dtype
,
self
.
_op_device_key
:
read_device
,
# A trick to make the role LRSched to avoid copy every
# microbatch
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录