Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
e26c6d78
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
e26c6d78
编写于
4月 11, 2018
作者:
C
chengduoZH
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
code refine
上级
ae5923e2
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
83 addition
and
100 deletion
+83
-100
paddle/fluid/framework/details/CMakeLists.txt
paddle/fluid/framework/details/CMakeLists.txt
+5
-5
paddle/fluid/framework/details/broadcast_op_handle.cc
paddle/fluid/framework/details/broadcast_op_handle.cc
+10
-19
paddle/fluid/framework/details/broadcast_op_handle.h
paddle/fluid/framework/details/broadcast_op_handle.h
+0
-4
paddle/fluid/framework/details/broadcast_op_handle_test.cc
paddle/fluid/framework/details/broadcast_op_handle_test.cc
+13
-5
paddle/fluid/framework/details/gather_op_handle.cc
paddle/fluid/framework/details/gather_op_handle.cc
+20
-19
paddle/fluid/framework/details/gather_op_handle.h
paddle/fluid/framework/details/gather_op_handle.h
+0
-4
paddle/fluid/framework/details/gather_op_handle_test.cc
paddle/fluid/framework/details/gather_op_handle_test.cc
+12
-44
paddle/fluid/framework/details/op_handle_base.cc
paddle/fluid/framework/details/op_handle_base.cc
+15
-0
paddle/fluid/framework/details/op_handle_base.h
paddle/fluid/framework/details/op_handle_base.h
+8
-0
未找到文件。
paddle/fluid/framework/details/CMakeLists.txt
浏览文件 @
e26c6d78
cc_library
(
var_handle SRCS var_handle.cc DEPS place
)
cc_library
(
op_handle_base SRCS op_handle_base.cc DEPS var_handle device_context
)
cc_library
(
op_handle_base SRCS op_handle_base.cc DEPS var_handle device_context
lod_tensor
)
cc_library
(
scale_loss_grad_op_handle SRCS scale_loss_grad_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory
)
cc_library
(
fetch_op_handle SRCS fetch_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory
)
nv_library
(
nccl_all_reduce_op_handle SRCS nccl_all_reduce_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory
...
...
@@ -21,10 +21,10 @@ cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS ssa_graph framewor
cc_library
(
threaded_ssa_graph_executor SRCS threaded_ssa_graph_executor.cc DEPS fetch_op_handle ssa_graph_executor scope
simple_threadpool device_context
)
cc_library
(
broadcast_op_handle SRCS broadcast_op_handle.cc DEPS op_handle_base scope
lod_tensor
ddim memory
)
cc_library
(
gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope
lod_tensor
ddim memory
)
cc_library
(
broadcast_op_handle SRCS broadcast_op_handle.cc DEPS op_handle_base scope ddim memory
)
cc_library
(
gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory
)
cc_test
(
broadcast_op_test SRCS broadcast_op_handle_test.cc DEPS var_handle op_handle_base scope
lod_tensor
ddim memory
cc_test
(
broadcast_op_test SRCS broadcast_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory
device_context broadcast_op_handle
)
cc_test
(
gather_op_test SRCS gather_op_handle_test.cc DEPS var_handle op_handle_base scope
lod_tensor
ddim memory
cc_test
(
gather_op_test SRCS gather_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory
device_context gather_op_handle
)
paddle/fluid/framework/details/broadcast_op_handle.cc
浏览文件 @
e26c6d78
...
...
@@ -18,23 +18,16 @@ namespace paddle {
namespace
framework
{
namespace
details
{
static
Tensor
*
GetTensorFromVar
(
Variable
*
in_var
)
{
if
(
in_var
->
IsType
<
LoDTensor
>
())
{
return
in_var
->
GetMutable
<
LoDTensor
>
();
}
else
if
(
in_var
->
IsType
<
SelectedRows
>
())
{
return
in_var
->
GetMutable
<
SelectedRows
>
()
->
mutable_value
();
}
else
{
PADDLE_THROW
(
"Var should be LoDTensor or SelectedRows"
);
}
return
nullptr
;
}
BroadcastOpHandle
::
BroadcastOpHandle
(
const
std
::
vector
<
Scope
*>
&
local_scopes
,
const
std
::
vector
<
platform
::
Place
>
&
places
)
:
local_scopes_
(
local_scopes
),
places_
(
places
)
{}
void
BroadcastOpHandle
::
RunImpl
()
{
PADDLE_ENFORCE_EQ
(
this
->
inputs_
.
size
(),
1
);
PADDLE_ENFORCE_EQ
(
this
->
outputs_
.
size
(),
places_
.
size
());
PADDLE_ENFORCE_EQ
(
this
->
inputs_
.
size
(),
1
,
"The number of input should be one."
);
PADDLE_ENFORCE_EQ
(
this
->
outputs_
.
size
(),
places_
.
size
(),
"The number of output should equal to the number of places."
);
// Wait input done, this Wait is asynchronous operation
auto
in_var_handle
=
static_cast
<
VarHandle
*>
(
this
->
inputs_
[
0
]);
...
...
@@ -43,7 +36,9 @@ void BroadcastOpHandle::RunImpl() {
inputs_
[
0
]
->
generated_op_
->
Wait
(
dev_ctxes_
[
in_place
]);
auto
in_scope_idx
=
in_var_handle
->
scope_idx_
;
PADDLE_ENFORCE_LT
(
in_scope_idx
,
local_scopes_
.
size
(),
""
);
PADDLE_ENFORCE_LT
(
in_scope_idx
,
local_scopes_
.
size
(),
"The input(%s) is not in the local_scopes."
,
in_var_handle
->
name_
);
auto
in_var
=
local_scopes_
[
in_scope_idx
]
->
FindVar
(
in_var_handle
->
name_
);
Tensor
*
in_tensor
=
GetTensorFromVar
(
in_var
);
...
...
@@ -56,12 +51,8 @@ void BroadcastOpHandle::RunImpl() {
"%s is not the the local_scopes "
,
out_handle
->
name_
);
auto
*
s
=
local_scopes_
[
out_scope_idx
];
auto
out_var
=
s
->
FindVar
(
out_handle
->
name_
);
PADDLE_ENFORCE_EQ
(
out_var
->
Type
(),
in_var
->
Type
(),
"The type of input and output is not equal. (%s_%d vs %s_%d)"
,
out_handle
->
name_
,
out_handle
->
scope_idx_
,
in_var_handle
->
name_
,
in_var_handle
->
scope_idx_
);
PADDLE_ENFORCE_EQ
(
out_p
.
which
(),
in_place
.
which
(),
"The place of input and output should be the same."
);
if
(
in_var
->
IsType
<
framework
::
SelectedRows
>
())
{
auto
&
in_sr
=
in_var
->
Get
<
framework
::
SelectedRows
>
();
...
...
paddle/fluid/framework/details/broadcast_op_handle.h
浏览文件 @
e26c6d78
...
...
@@ -28,10 +28,6 @@ namespace paddle {
namespace
framework
{
namespace
details
{
/*
* Broadcast the input to all scope.
*
*/
struct
BroadcastOpHandle
:
public
OpHandleBase
{
const
std
::
vector
<
Scope
*>
&
local_scopes_
;
const
std
::
vector
<
platform
::
Place
>
&
places_
;
...
...
paddle/fluid/framework/details/broadcast_op_handle_test.cc
浏览文件 @
e26c6d78
...
...
@@ -17,6 +17,10 @@
#include "paddle/fluid/platform/device_context.h"
namespace
paddle
{
namespace
framework
{
namespace
details
{
namespace
f
=
paddle
::
framework
;
namespace
p
=
paddle
::
platform
;
...
...
@@ -25,7 +29,7 @@ const f::DDim kDims = {20, 20};
class
BroadcastTester
:
public
::
testing
::
Test
{
public:
void
InitCtx
(
bool
use_gpu
)
{
void
InitCtx
OnGpu
(
bool
use_gpu
)
{
if
(
use_gpu
)
{
#ifdef PADDLE_WITH_CUDA
int
count
=
p
::
GetCUDADeviceCount
();
...
...
@@ -200,23 +204,27 @@ class BroadcastTester : public ::testing::Test {
};
TEST_F
(
BroadcastTester
,
TestCPUBroadcastTestLodTensor
)
{
InitCtx
(
false
);
InitCtx
OnGpu
(
false
);
TestBroadcastLodTensor
();
}
TEST_F
(
BroadcastTester
,
TestCPUBroadcastTestSelectedRows
)
{
InitCtx
(
false
);
InitCtx
OnGpu
(
false
);
TestBroadcastSelectedRows
();
}
#ifdef PADDLE_WITH_CUDA
TEST_F
(
BroadcastTester
,
TestGPUBroadcastTestLodTensor
)
{
InitCtx
(
true
);
InitCtx
OnGpu
(
true
);
TestBroadcastLodTensor
();
}
TEST_F
(
BroadcastTester
,
TestGPUBroadcastTestSelectedRows
)
{
InitCtx
(
true
);
InitCtx
OnGpu
(
true
);
TestBroadcastSelectedRows
();
}
#endif
}
// namespace details
}
// namespace framework
}
// namespace paddle
paddle/fluid/framework/details/gather_op_handle.cc
浏览文件 @
e26c6d78
...
...
@@ -18,23 +18,16 @@ namespace paddle {
namespace
framework
{
namespace
details
{
static
Tensor
*
GetTensorFromVar
(
Variable
*
in_var
)
{
if
(
in_var
->
IsType
<
LoDTensor
>
())
{
return
in_var
->
GetMutable
<
LoDTensor
>
();
}
else
if
(
in_var
->
IsType
<
SelectedRows
>
())
{
return
in_var
->
GetMutable
<
SelectedRows
>
()
->
mutable_value
();
}
else
{
PADDLE_THROW
(
"Var should be LoDTensor or SelectedRows"
);
}
return
nullptr
;
}
GatherOpHandle
::
GatherOpHandle
(
const
std
::
vector
<
Scope
*>
&
local_scopes
,
const
std
::
vector
<
platform
::
Place
>
&
places
)
:
local_scopes_
(
local_scopes
),
places_
(
places
)
{}
void
GatherOpHandle
::
RunImpl
()
{
PADDLE_ENFORCE_EQ
(
this
->
inputs_
.
size
(),
places_
.
size
());
PADDLE_ENFORCE_EQ
(
this
->
outputs_
.
size
(),
1
);
PADDLE_ENFORCE_EQ
(
this
->
inputs_
.
size
(),
places_
.
size
(),
"The number of inputs should be equal to the number of place."
);
PADDLE_ENFORCE_EQ
(
this
->
outputs_
.
size
(),
1
,
"The number of output should be one."
);
// Wait input done, this Wait is asynchronous operation
for
(
auto
*
in
:
inputs_
)
{
...
...
@@ -46,6 +39,7 @@ void GatherOpHandle::RunImpl() {
auto
in_0_handle
=
static_cast
<
VarHandle
*>
(
inputs_
[
0
]);
auto
pre_in_var
=
local_scopes_
[
in_0_handle
->
scope_idx_
]
->
FindVar
(
in_0_handle
->
name_
);
auto
pre_place
=
in_0_handle
->
place_
;
std
::
vector
<
int64_t
>
out_rows
;
std
::
vector
<
Tensor
*>
in_tensors
;
...
...
@@ -58,7 +52,8 @@ void GatherOpHandle::RunImpl() {
in_places
.
push_back
(
in_p
);
PADDLE_ENFORCE_LT
(
in_handle
->
scope_idx_
,
local_scopes_
.
size
(),
"%s is not the the local_scopes "
,
in_handle
->
name_
);
PADDLE_ENFORCE_EQ
(
in_p
.
which
(),
pre_place
.
which
(),
"The place of input should be the same."
);
auto
*
s
=
local_scopes_
[
in_handle
->
scope_idx_
];
auto
in_var
=
s
->
FindVar
(
in_handle
->
name_
);
PADDLE_ENFORCE_EQ
(
in_var
->
Type
(),
pre_in_var
->
Type
(),
...
...
@@ -69,13 +64,17 @@ void GatherOpHandle::RunImpl() {
auto
&
in_sr
=
in_var
->
Get
<
framework
::
SelectedRows
>
();
auto
in_sr_rows
=
in_sr
.
rows
();
out_rows
.
insert
(
out_rows
.
begin
(),
in_sr_rows
.
begin
(),
in_sr_rows
.
end
());
PADDLE_ENFORCE_EQ
(
pre_in
.
height
(),
in_sr
.
height
(),
""
);
PADDLE_ENFORCE_EQ
(
pre_in
.
GetCompleteDims
(),
in_sr
.
GetCompleteDims
(),
""
);
PADDLE_ENFORCE_EQ
(
pre_in
.
height
(),
in_sr
.
height
(),
"The height of inputs is not consistent."
);
PADDLE_ENFORCE_EQ
(
pre_in
.
GetCompleteDims
(),
in_sr
.
GetCompleteDims
(),
,
"The dims of inputs is not consistent."
);
}
else
if
(
in_var
->
IsType
<
framework
::
LoDTensor
>
())
{
auto
&
pre_in
=
pre_in_var
->
Get
<
framework
::
LoDTensor
>
();
auto
&
in_lodtensor
=
in_var
->
Get
<
framework
::
LoDTensor
>
();
PADDLE_ENFORCE_EQ
(
in_lodtensor
.
lod
(),
pre_in
.
lod
());
PADDLE_ENFORCE_EQ
(
in_lodtensor
.
dims
(),
pre_in
.
dims
());
PADDLE_ENFORCE_EQ
(
in_lodtensor
.
lod
(),
pre_in
.
lod
(),
"The lod of inputs is not consistent."
);
PADDLE_ENFORCE_EQ
(
in_lodtensor
.
dims
(),
pre_in
.
dims
(),
"The dims of inputs is not consistent."
);
}
else
{
PADDLE_THROW
(
"Var should be LoDTensor or SelectedRows."
);
}
...
...
@@ -88,7 +87,8 @@ void GatherOpHandle::RunImpl() {
auto
&
out_place
=
out_handle
->
place_
;
auto
out_scope_idx
=
out_handle
->
scope_idx_
;
auto
out_var
=
local_scopes_
[
out_scope_idx
]
->
FindVar
(
out_handle
->
name_
);
PADDLE_ENFORCE_EQ
(
out_place
.
which
(),
pre_place
.
which
(),
"The place of input and output should be the same."
);
if
(
pre_in_var
->
IsType
<
framework
::
SelectedRows
>
())
{
auto
&
pre_in
=
pre_in_var
->
Get
<
framework
::
SelectedRows
>
();
auto
out
=
out_var
->
GetMutable
<
framework
::
SelectedRows
>
();
...
...
@@ -110,12 +110,13 @@ void GatherOpHandle::RunImpl() {
s
=
e
;
}
}
else
if
(
pre_in_var
->
IsType
<
framework
::
LoDTensor
>
())
{
// gather LoDTensor ???
}
else
{
PADDLE_THROW
(
"Var should be LoDTensor or SelectedRows."
);
}
}
std
::
string
GatherOpHandle
::
Name
()
const
{
return
"
broadcast
"
;
}
std
::
string
GatherOpHandle
::
Name
()
const
{
return
"
gather
"
;
}
}
// namespace details
}
// namespace framework
}
// namespace paddle
paddle/fluid/framework/details/gather_op_handle.h
浏览文件 @
e26c6d78
...
...
@@ -28,10 +28,6 @@ namespace paddle {
namespace
framework
{
namespace
details
{
/*
* Broadcast the input to all scope.
*
*/
struct
GatherOpHandle
:
public
OpHandleBase
{
const
std
::
vector
<
Scope
*>
&
local_scopes_
;
const
std
::
vector
<
platform
::
Place
>
&
places_
;
...
...
paddle/fluid/framework/details/gather_op_handle_test.cc
浏览文件 @
e26c6d78
...
...
@@ -17,6 +17,9 @@
#include "paddle/fluid/platform/device_context.h"
namespace
paddle
{
namespace
framework
{
namespace
details
{
namespace
f
=
paddle
::
framework
;
namespace
p
=
paddle
::
platform
;
...
...
@@ -25,7 +28,7 @@ const f::DDim kDims = {20, 20};
class
GatherTester
:
public
::
testing
::
Test
{
public:
void
InitCtx
(
bool
use_gpu
)
{
void
InitCtx
OnGpu
(
bool
use_gpu
)
{
if
(
use_gpu
)
{
#ifdef PADDLE_WITH_CUDA
int
count
=
p
::
GetCUDADeviceCount
();
...
...
@@ -103,45 +106,7 @@ class GatherTester : public ::testing::Test {
}
}
void
TestGatherLodTensor
()
{
// int input_scope_idx = 0;
// InitGatherOp<f::LoDTensor>(input_scope_idx);
//
// auto in_var = local_scope_[input_scope_idx]->Var("input");
// auto in_lod_tensor = in_var->GetMutable<f::LoDTensor>();
// in_lod_tensor->mutable_data<float>(kDims, gpu_list_[input_scope_idx]);
//
// std::vector<float> send_vector(f::product(kDims), input_scope_idx +
// 12);
// for (size_t k = 0; k < send_vector.size(); ++k) {
// send_vector[k] = k;
// }
// f::LoD lod{{0, 10, 20}};
// paddle::framework::TensorFromVector<float>(
// send_vector, *(ctxs_[input_scope_idx]), in_lod_tensor);
// in_lod_tensor->set_lod(lod);
//
// gather_op_handle_->Run(false);
//
// WaitAll();
//
// p::CPUPlace cpu_place;
// for (size_t j = 0; j < gpu_list_.size(); ++j) {
// auto out_var = local_scope_[j]->Var("out");
// auto out_tensor = out_var->Get<f::LoDTensor>();
// PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal.");
//
// f::Tensor result_tensor;
// f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor);
// float* ct = result_tensor.mutable_data<float>(cpu_place);
//
// for (int64_t j = 0; j < f::product(kDims); ++j) {
// ASSERT_NEAR(ct[j], send_vector[j], 1e-5);
// }
// }
//
// GatherOpDestroy();
}
void
TestGatherLodTensor
()
{}
void
TestGatherSelectedRows
()
{
int
output_scope_idx
=
0
;
...
...
@@ -205,23 +170,26 @@ class GatherTester : public ::testing::Test {
};
// TEST_F(GatherTester, TestCPUGatherTestLodTensor) {
// InitCtx(false);
// InitCtx
OnGpu
(false);
// TestGatherLodTensor();
//}
TEST_F
(
GatherTester
,
TestCPUGatherTestSelectedRows
)
{
InitCtx
(
false
);
InitCtx
OnGpu
(
false
);
TestGatherSelectedRows
();
}
#ifdef PADDLE_WITH_CUDA
// TEST_F(GatherTester, TestGPUGatherTestLodTensor) {
// InitCtx(true);
// InitCtx
OnGpu
(true);
// TestGatherLodTensor();
//}
TEST_F
(
GatherTester
,
TestGPUGatherTestSelectedRows
)
{
InitCtx
(
true
);
InitCtx
OnGpu
(
true
);
TestGatherSelectedRows
();
}
#endif
}
// namespace details
}
// namespace framework
}
// namespace paddle
paddle/fluid/framework/details/op_handle_base.cc
浏览文件 @
e26c6d78
...
...
@@ -17,6 +17,21 @@
namespace
paddle
{
namespace
framework
{
namespace
details
{
// GetTensorFromVar is used in broadcast_op handle and gather_op handle, so it
// should be placed in a commonplace. I don't find an appropriate place, so I
// temporarily place it in op_handle_base.
Tensor
*
GetTensorFromVar
(
Variable
*
in_var
)
{
if
(
in_var
->
IsType
<
LoDTensor
>
())
{
return
in_var
->
GetMutable
<
LoDTensor
>
();
}
else
if
(
in_var
->
IsType
<
SelectedRows
>
())
{
return
in_var
->
GetMutable
<
SelectedRows
>
()
->
mutable_value
();
}
else
{
PADDLE_THROW
(
"Var should be LoDTensor or SelectedRows"
);
}
return
nullptr
;
}
std
::
string
OpHandleBase
::
DebugString
()
const
{
std
::
stringstream
ss
;
ss
<<
"("
;
...
...
paddle/fluid/framework/details/op_handle_base.h
浏览文件 @
e26c6d78
...
...
@@ -17,6 +17,9 @@
#include <vector>
#include "paddle/fluid/framework/details/var_handle.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/macros.h"
...
...
@@ -24,6 +27,11 @@ namespace paddle {
namespace
framework
{
namespace
details
{
// GetTensorFromVar is used in broadcast_op handle and gather_op handle, so it
// should be placed in a commonplace. I don't find an appropriate place, so I
// temporarily place it in op_handle.
Tensor
*
GetTensorFromVar
(
Variable
*
in_var
);
class
OpHandleBase
{
private:
DISABLE_COPY_AND_ASSIGN
(
OpHandleBase
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录