Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
2573ac14
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2573ac14
编写于
10月 25, 2017
作者:
D
Dong Zhihong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
"remove python side test case to another PR."
上级
4e165f4e
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
121 addition
and
320 deletion
+121
-320
paddle/operators/nccl_op_test.cu
paddle/operators/nccl_op_test.cu
+121
-198
python/paddle/v2/framework/tests/test_nccl_allreduce_op.py
python/paddle/v2/framework/tests/test_nccl_allreduce_op.py
+0
-97
python/paddle/v2/framework/tests/test_nccl_reduce_op.py
python/paddle/v2/framework/tests/test_nccl_reduce_op.py
+0
-25
未找到文件。
paddle/operators/nccl_op_test.cu
浏览文件 @
2573ac14
...
...
@@ -126,201 +126,42 @@ class NCCLTester : public ::testing::Test {
std
::
mutex
mu
;
};
// // ncclInitOp with desc
// TEST(NCCL, ncclInitOp) {
// std::unique_ptr<f::OpDescBind> op_desc(new f::OpDescBind);
// op_desc->SetType("ncclInit");
// op_desc->SetOutput("Communicator", {"x1"});
// op_desc->SetAttr("gpus", {gpu_list});
// f::Scope g_scope;
// std::unique_ptr<p::DeviceContext> ctx(new
// p::CPUDeviceContext(p::CPUPlace()));
// auto *var = g_scope.Var("x1");
// var->GetMutable<p::Communicator>();
// auto op = f::OpRegistry::CreateOp(*op_desc);
// VLOG(1) << "invoke NCCLInitOp.";
// op->Run(g_scope, *ctx.get());
// VLOG(1) << "NCCLInitOp finished.";
// }
// // ncclAllReduceOp with desc
// TEST_F(NCCLTester, ncclAllReduceOp) {
// std::unique_ptr<f::OpDescBind> op2(new f::OpDescBind);
// op2->SetType("ncclAllReduce");
// op2->SetInput("X", {"st"});
// op2->SetInput("Communicator", {"comm"});
// op2->SetOutput("Out", {"rt"});
// std::vector<f::Scope *> dev_scopes;
// std::vector<std::thread> ths;
// for (size_t i = 0; i < gpu_list.size(); ++i) {
// dev_scopes.emplace_back(&g_scope.NewScope());
// std::thread th(&NCCLTester::PerThreadProgram<float>, this, gpu_list[i],
// *op2.get(), dev_scopes[i]);
// ths.emplace_back(std::move(th));
// }
// for (size_t i = 0; i < gpu_list.size(); ++i) {
// ths[i].join();
// }
// // check results
// float result = std::accumulate(gpu_list.begin(), gpu_list.end(), 0);
// for (size_t i = 0; i < dev_scopes.size(); ++i) {
// p::CPUPlace cpu_place;
// p::GPUPlace gpu_place(gpu_list[i]);
// auto &recv_tensor = dev_scopes[i]->FindVar("rt")->Get<f::LoDTensor>();
// auto *rt = recv_tensor.data<float>();
// auto *result_tensor =
// dev_scopes[i]->Var("ct")->GetMutable<f::LoDTensor>();
// result_tensor->Resize(kDims);
// auto *ct = result_tensor->mutable_data<float>(cpu_place);
// paddle::memory::Copy(
// cpu_place, ct, p::GPUPlace(gpu_list[i]), rt,
// recv_tensor.numel() * sizeof(float),
// static_cast<p::CUDADeviceContext *>(dev_ctxs[i])->stream());
// for (size_t j = 0; j < f::product(kDims); ++j) {
// ASSERT_NEAR(ct[j], result, 1e-5);
// }
// }
// }
// // ncclAReduceOp with desc
// TEST_F(NCCLTester, ncclReduceOp) {
// std::unique_ptr<f::OpDescBind> op2(new f::OpDescBind);
// const int kRoot = 0;
// op2->SetType("ncclReduce");
// op2->SetInput("X", {"st"});
// op2->SetInput("Communicator", {"comm"});
// op2->SetOutput("Out", {"rt"});
// op2->SetAttr("root", {kRoot});
// std::vector<f::Scope *> dev_scopes;
// std::vector<std::thread> ths;
// for (size_t i = 0; i < gpu_list.size(); ++i) {
// dev_scopes.emplace_back(&g_scope.NewScope());
// std::thread th(&NCCLTester::PerThreadProgram<float>, this, gpu_list[i],
// *op2.get(), dev_scopes[i]);
// ths.emplace_back(std::move(th));
// }
// for (size_t i = 0; i < gpu_list.size(); ++i) {
// ths[i].join();
// }
// // check results on
// float result = std::accumulate(gpu_list.begin(), gpu_list.end(), 0);
// p::CPUPlace cpu_place;
// p::GPUPlace gpu_place(gpu_list[kRoot]);
// auto &recv_tensor = dev_scopes[kRoot]->FindVar("rt")->Get<f::LoDTensor>();
// auto *rt = recv_tensor.data<float>();
// auto *result_tensor =
// dev_scopes[kRoot]->Var("ct")->GetMutable<f::LoDTensor>();
// result_tensor->Resize(kDims);
// auto *ct = result_tensor->mutable_data<float>(cpu_place);
// paddle::memory::Copy(
// cpu_place, ct, p::GPUPlace(gpu_list[kRoot]), rt,
// recv_tensor.numel() * sizeof(float),
// static_cast<p::CUDADeviceContext *>(dev_ctxs[kRoot])->stream());
// for (int j = 0; j < f::product(kDims); ++j) {
// ASSERT_NEAR(ct[j], result, 1e-5);
// }
// }
// // // ncclBcastOp with desc
// TEST_F(NCCLTester, ncclBcastOp) {
// std::unique_ptr<f::OpDescBind> op2(new f::OpDescBind);
// const int kRoot = 5;
// op2->SetType("ncclBcast");
// op2->SetInput("X", {"st"});
// op2->SetInput("Communicator", {"comm"});
// op2->SetOutput("Out", {"rt"});
// op2->SetAttr("root", {kRoot});
// std::vector<f::Scope *> dev_scopes;
// std::vector<std::thread> ths;
// for (size_t i = 0; i < gpu_list.size(); ++i) {
// dev_scopes.emplace_back(&g_scope.NewScope());
// std::thread th(&NCCLTester::PerThreadProgram<float>, this, gpu_list[i],
// *op2.get(), dev_scopes[i]);
// ths.emplace_back(std::move(th));
// }
// for (size_t i = 0; i < gpu_list.size(); ++i) {
// ths[i].join();
// }
// const int idx = 1;
// // check results on
// float result = kRoot;
// p::CPUPlace cpu_place;
// p::GPUPlace gpu_place(gpu_list[idx]);
// auto &recv_tensor = dev_scopes[idx]->FindVar("rt")->Get<f::LoDTensor>();
// auto *rt = recv_tensor.data<float>();
// auto *result_tensor =
// dev_scopes[idx]->Var("ct")->GetMutable<f::LoDTensor>();
// result_tensor->Resize(kDims);
// auto *ct = result_tensor->mutable_data<float>(cpu_place);
// paddle::memory::Copy(
// cpu_place, ct, p::GPUPlace(gpu_list[idx]), rt,
// recv_tensor.numel() * sizeof(float),
// static_cast<p::CUDADeviceContext *>(dev_ctxs[idx])->stream());
// for (size_t j = 0; j < f::product(kDims); ++j) {
// ASSERT_NEAR(ct[j], result, 1e-5);
// }
// }
// joint ncclBcastOp and ncclReduceOp
TEST_F
(
NCCLTester
,
MultipleOp
)
{
const
int
kRoot
=
0
;
std
::
unique_ptr
<
f
::
OpDescBind
>
op1
(
new
f
::
OpDescBind
);
op1
->
SetType
(
"ncclReduce"
);
op1
->
SetInput
(
"X"
,
{
"st"
});
op1
->
SetInput
(
"Communicator"
,
{
"comm"
});
op1
->
SetOutput
(
"Out"
,
{
"rt"
});
op1
->
SetAttr
(
"root"
,
{
kRoot
});
// ncclInitOp with desc
TEST
(
NCCL
,
ncclInitOp
)
{
std
::
unique_ptr
<
f
::
OpDescBind
>
op_desc
(
new
f
::
OpDescBind
);
op_desc
->
SetType
(
"ncclInit"
);
op_desc
->
SetOutput
(
"Communicator"
,
{
"x1"
});
op_desc
->
SetAttr
(
"gpus"
,
{
gpu_list
});
f
::
Scope
g_scope
;
std
::
unique_ptr
<
p
::
DeviceContext
>
ctx
(
new
p
::
CPUDeviceContext
(
p
::
CPUPlace
()));
auto
*
var
=
g_scope
.
Var
(
"x1"
);
var
->
GetMutable
<
p
::
Communicator
>
();
auto
op
=
f
::
OpRegistry
::
CreateOp
(
*
op_desc
);
VLOG
(
1
)
<<
"invoke NCCLInitOp."
;
op
->
Run
(
g_scope
,
*
ctx
.
get
());
VLOG
(
1
)
<<
"NCCLInitOp finished."
;
}
// ncclAllReduceOp with desc
TEST_F
(
NCCLTester
,
ncclAllReduceOp
)
{
std
::
unique_ptr
<
f
::
OpDescBind
>
op2
(
new
f
::
OpDescBind
);
op2
->
SetType
(
"nccl
Bcast
"
);
op2
->
SetInput
(
"X"
,
{
"
r
t"
});
op2
->
SetType
(
"nccl
AllReduce
"
);
op2
->
SetInput
(
"X"
,
{
"
s
t"
});
op2
->
SetInput
(
"Communicator"
,
{
"comm"
});
op2
->
SetOutput
(
"Out"
,
{
"out"
});
op2
->
SetAttr
(
"root"
,
{
kRoot
});
op2
->
SetOutput
(
"Out"
,
{
"rt"
});
std
::
vector
<
f
::
Scope
*>
dev_scopes
;
// for (size_t i = 0; i < dev_scopes.size(); ++i) {
// dev_scopes[i]->Var("out")->GetMutable<f::LoDTensor>();
// }
std
::
vector
<
std
::
thread
>
ths
;
// run Reduce
for
(
size_t
i
=
0
;
i
<
gpu_list
.
size
();
++
i
)
{
dev_scopes
.
emplace_back
(
&
g_scope
.
NewScope
());
std
::
thread
th
(
&
NCCLTester
::
PerThreadProgram
<
float
>
,
this
,
gpu_list
[
i
],
*
op
1
.
get
(),
dev_scopes
[
i
]);
*
op
2
.
get
(),
dev_scopes
[
i
]);
ths
.
emplace_back
(
std
::
move
(
th
));
}
...
...
@@ -328,11 +169,46 @@ TEST_F(NCCLTester, MultipleOp) {
ths
[
i
].
join
();
}
ths
.
clear
();
// check results
float
result
=
std
::
accumulate
(
gpu_list
.
begin
(),
gpu_list
.
end
(),
0
);
for
(
size_t
i
=
0
;
i
<
dev_scopes
.
size
();
++
i
)
{
p
::
CPUPlace
cpu_place
;
p
::
GPUPlace
gpu_place
(
gpu_list
[
i
]);
auto
&
recv_tensor
=
dev_scopes
[
i
]
->
FindVar
(
"rt"
)
->
Get
<
f
::
LoDTensor
>
();
auto
*
rt
=
recv_tensor
.
data
<
float
>
();
auto
*
result_tensor
=
dev_scopes
[
i
]
->
Var
(
"ct"
)
->
GetMutable
<
f
::
LoDTensor
>
();
result_tensor
->
Resize
(
kDims
);
auto
*
ct
=
result_tensor
->
mutable_data
<
float
>
(
cpu_place
);
paddle
::
memory
::
Copy
(
cpu_place
,
ct
,
p
::
GPUPlace
(
gpu_list
[
i
]),
rt
,
recv_tensor
.
numel
()
*
sizeof
(
float
),
static_cast
<
p
::
CUDADeviceContext
*>
(
dev_ctxs
[
i
])
->
stream
());
for
(
size_t
j
=
0
;
j
<
f
::
product
(
kDims
);
++
j
)
{
ASSERT_NEAR
(
ct
[
j
],
result
,
1e-5
);
}
}
}
// ncclAReduceOp with desc
TEST_F
(
NCCLTester
,
ncclReduceOp
)
{
std
::
unique_ptr
<
f
::
OpDescBind
>
op2
(
new
f
::
OpDescBind
);
const
int
kRoot
=
0
;
op2
->
SetType
(
"ncclReduce"
);
op2
->
SetInput
(
"X"
,
{
"st"
});
op2
->
SetInput
(
"Communicator"
,
{
"comm"
});
op2
->
SetOutput
(
"Out"
,
{
"rt"
});
op2
->
SetAttr
(
"root"
,
{
kRoot
});
std
::
vector
<
f
::
Scope
*>
dev_scopes
;
std
::
vector
<
std
::
thread
>
ths
;
// run Bcast
for
(
size_t
i
=
0
;
i
<
gpu_list
.
size
();
++
i
)
{
dev_scopes
[
i
]
->
Var
(
"out"
)
->
GetMutable
<
f
::
LoDTensor
>
(
);
dev_scopes
.
emplace_back
(
&
g_scope
.
NewScope
()
);
std
::
thread
th
(
&
NCCLTester
::
PerThreadProgram
<
float
>
,
this
,
gpu_list
[
i
],
*
op2
.
get
(),
dev_scopes
[
i
]);
ths
.
emplace_back
(
std
::
move
(
th
));
...
...
@@ -342,27 +218,74 @@ TEST_F(NCCLTester, MultipleOp) {
ths
[
i
].
join
();
}
// check results
// check results
on
float
result
=
std
::
accumulate
(
gpu_list
.
begin
(),
gpu_list
.
end
(),
0
);
for
(
size_t
i
=
0
;
i
<
dev_scopes
.
size
();
++
i
)
{
p
::
CPUPlace
cpu_place
;
p
::
GPUPlace
gpu_place
(
gpu_list
[
i
]);
p
::
GPUPlace
gpu_place
(
gpu_list
[
kRoot
]);
auto
&
recv_tensor
=
dev_scopes
[
i
]
->
FindVar
(
"rt"
)
->
Get
<
f
::
LoDTensor
>
();
auto
&
recv_tensor
=
dev_scopes
[
kRoot
]
->
FindVar
(
"rt"
)
->
Get
<
f
::
LoDTensor
>
();
auto
*
rt
=
recv_tensor
.
data
<
float
>
();
auto
*
result_tensor
=
dev_scopes
[
i
]
->
Var
(
"ct"
)
->
GetMutable
<
f
::
LoDTensor
>
();
auto
*
result_tensor
=
dev_scopes
[
kRoot
]
->
Var
(
"ct"
)
->
GetMutable
<
f
::
LoDTensor
>
();
result_tensor
->
Resize
(
kDims
);
auto
*
ct
=
result_tensor
->
mutable_data
<
float
>
(
cpu_place
);
paddle
::
memory
::
Copy
(
cpu_place
,
ct
,
p
::
GPUPlace
(
gpu_list
[
i
]),
rt
,
cpu_place
,
ct
,
p
::
GPUPlace
(
gpu_list
[
kRoot
]),
rt
,
recv_tensor
.
numel
()
*
sizeof
(
float
),
static_cast
<
p
::
CUDADeviceContext
*>
(
dev_ctxs
[
i
])
->
stream
());
static_cast
<
p
::
CUDADeviceContext
*>
(
dev_ctxs
[
kRoot
])
->
stream
());
for
(
int
j
=
0
;
j
<
f
::
product
(
kDims
);
++
j
)
{
ASSERT_NEAR
(
ct
[
j
],
result
,
1e-5
);
}
}
// // ncclBcastOp with desc
TEST_F
(
NCCLTester
,
ncclBcastOp
)
{
std
::
unique_ptr
<
f
::
OpDescBind
>
op2
(
new
f
::
OpDescBind
);
const
int
kRoot
=
5
;
op2
->
SetType
(
"ncclBcast"
);
op2
->
SetInput
(
"X"
,
{
"st"
});
op2
->
SetInput
(
"Communicator"
,
{
"comm"
});
op2
->
SetOutput
(
"Out"
,
{
"rt"
});
op2
->
SetAttr
(
"root"
,
{
kRoot
});
std
::
vector
<
f
::
Scope
*>
dev_scopes
;
std
::
vector
<
std
::
thread
>
ths
;
for
(
size_t
i
=
0
;
i
<
gpu_list
.
size
();
++
i
)
{
dev_scopes
.
emplace_back
(
&
g_scope
.
NewScope
());
std
::
thread
th
(
&
NCCLTester
::
PerThreadProgram
<
float
>
,
this
,
gpu_list
[
i
],
*
op2
.
get
(),
dev_scopes
[
i
]);
ths
.
emplace_back
(
std
::
move
(
th
));
}
for
(
size_t
i
=
0
;
i
<
gpu_list
.
size
();
++
i
)
{
ths
[
i
].
join
();
}
const
int
idx
=
1
;
// check results on
float
result
=
kRoot
;
p
::
CPUPlace
cpu_place
;
p
::
GPUPlace
gpu_place
(
gpu_list
[
idx
]);
auto
&
recv_tensor
=
dev_scopes
[
idx
]
->
FindVar
(
"rt"
)
->
Get
<
f
::
LoDTensor
>
();
auto
*
rt
=
recv_tensor
.
data
<
float
>
();
auto
*
result_tensor
=
dev_scopes
[
idx
]
->
Var
(
"ct"
)
->
GetMutable
<
f
::
LoDTensor
>
();
result_tensor
->
Resize
(
kDims
);
auto
*
ct
=
result_tensor
->
mutable_data
<
float
>
(
cpu_place
);
paddle
::
memory
::
Copy
(
cpu_place
,
ct
,
p
::
GPUPlace
(
gpu_list
[
idx
]),
rt
,
recv_tensor
.
numel
()
*
sizeof
(
float
),
static_cast
<
p
::
CUDADeviceContext
*>
(
dev_ctxs
[
idx
])
->
stream
());
for
(
size_t
j
=
0
;
j
<
f
::
product
(
kDims
);
++
j
)
{
ASSERT_NEAR
(
ct
[
j
],
result
,
1e-5
);
}
}
...
...
python/paddle/v2/framework/tests/test_nccl_allreduce_op.py
已删除
100644 → 0
浏览文件 @
4e165f4e
import
unittest
,
os
from
threading
import
Thread
import
numpy
as
np
import
paddle.v2
as
paddle
from
paddle.v2.framework.op
import
Operator
import
paddle.v2.framework.core
as
core
from
op_test
import
OpTest
,
create_op
,
set_input
# gpu_list = os.environ["NV_LIST"]
gpu_list
=
"0,1,2,3"
if
not
core
.
is_compile_gpu
()
or
not
gpu_list
:
exit
(
0
)
g_scope
=
core
.
Scope
()
g_ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
gpus
=
[
int
(
g
)
for
g
in
gpu_list
.
split
(
","
)]
# ground truth
def
allreduce
(
tensors
,
gpus
):
num_device
=
len
(
gpus
)
assert
(
len
(
tensors
)
==
num_device
),
"not match of tensor and device"
Out
=
tensors
for
i
in
range
(
1
,
len
(
tensors
)):
Out
[
0
]
+=
Out
[
i
]
for
i
in
range
(
1
,
len
(
tensors
)):
Out
[
i
]
=
Out
[
0
]
return
Out
input_data
=
[
np
.
random
.
random
((
32
,
32
)).
astype
(
"float32"
)
for
i
in
range
(
len
(
gpus
))
]
output_data
=
allreduce
(
input_data
,
gpus
)
def
thread_allreduce_op
(
thread_id
,
gpu_id
):
i
=
gpu_id
scope
=
g_scope
.
new_scope
()
place
=
core
.
GPUPlace
(
gpus
[
i
])
inputs
=
{
"X"
:
input_data
[
i
],
"Communicator"
:
scope
.
find_var
(
"Communicator"
)
}
outputs
=
{
"Out"
:
output_data
[
i
]}
op
=
create_op
(
scope
,
"ncclAllReduce"
,
inputs
,
outputs
,
attrs
=
{})
place
=
core
.
GPUPlace
(
gpus
[
i
])
set_input
(
scope
,
op
,
inputs
,
place
)
ctx
=
core
.
DeviceContext
.
create
(
place
)
print
"thread_id : "
,
thread_id
,
"gpu_id : "
,
gpu_id
,
" invoke allreduce"
op
.
run
(
scope
,
ctx
)
print
"thread_id : "
,
thread_id
,
"gpu_id : "
,
gpu_id
,
" allreduce Done."
class
TestNCCLAllReduce
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
op_type
=
"ncclAllReduce"
nccl_init
=
create_op
(
g_scope
,
op_type
=
"ncclInit"
,
inputs
=
{},
outputs
=
{
"Communicator"
:
g_scope
.
var
(
"Communicator"
).
get_communicator
()
},
attrs
=
{
"gpus"
:
gpus
})
nccl_init
.
run
(
g_scope
,
g_ctx
)
def
test_output
(
self
):
ops
=
[]
for
i
in
range
(
len
(
gpus
)):
th
=
Thread
(
target
=
thread_allreduce_op
,
args
=
(
i
,
gpus
[
i
],
))
th
.
start
()
ops
.
append
(
th
)
for
t
in
ops
:
t
.
join
()
idx
=
0
for
out_name
,
out_dup
in
Operator
.
get_op_outputs
(
self
.
op_type
):
actual
=
np
.
array
(
g_scope
.
find_var
(
out_name
).
get_tensor
())
expect
=
output_data
[
idx
]
idx
+=
1
self
.
assertTrue
(
actual
,
expect
),
"has diff"
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_nccl_reduce_op.py
已删除
100644 → 0
浏览文件 @
4e165f4e
import
unittest
,
os
import
numpy
as
np
import
paddle.v2
as
paddle
from
paddle.v2.framework.op
import
Operator
import
paddle.v2.framework.core
as
core
from
op_test
import
OpTest
,
create_op
,
set_input
gpu_list
=
"0,1,2,3"
g_scope
=
core
.
Scope
()
g_ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
if
not
core
.
is_compile_gpu
()
or
not
gpu_list
:
exit
(
0
)
class
TestNCCLReduce
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"ncclReduce"
self
.
gpus
=
[
int
(
g
)
for
g
in
gpu_list
.
split
(
","
)]
self
.
scope
=
g_scope
.
var
(
"Communicator"
).
get_communicator
()
self
.
outputs
=
{
"Communicator"
:
self
.
scope
.
var
(
"Communicator"
)}
def
test_check_output
(
self
):
self
.
check_output
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录