Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
0446b488
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0446b488
编写于
2月 16, 2017
作者:
L
liaogang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
LayerOutput for single machine multiple devices
上级
b9dfe8e7
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
45 addition
and
1 deletion
+45
-1
paddle/gserver/gradientmachines/GradientMachine.h
paddle/gserver/gradientmachines/GradientMachine.h
+2
-0
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
+38
-0
paddle/gserver/gradientmachines/MultiGradientMachine.h
paddle/gserver/gradientmachines/MultiGradientMachine.h
+2
-0
paddle/gserver/gradientmachines/NeuralNetwork.cpp
paddle/gserver/gradientmachines/NeuralNetwork.cpp
+1
-0
paddle/gserver/gradientmachines/NeuralNetwork.h
paddle/gserver/gradientmachines/NeuralNetwork.h
+2
-1
未找到文件。
paddle/gserver/gradientmachines/GradientMachine.h
浏览文件 @
0446b488
...
@@ -134,6 +134,8 @@ public:
...
@@ -134,6 +134,8 @@ public:
backward
(
callback
);
backward
(
callback
);
}
}
virtual
MatrixPtr
getLayerOutput
(
const
std
::
string
&
layerName
)
=
0
;
// see comment in Layer.h for the function with the same name
// see comment in Layer.h for the function with the same name
virtual
void
resetState
()
{}
virtual
void
resetState
()
{}
...
...
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
浏览文件 @
0446b488
...
@@ -282,6 +282,44 @@ void MultiGradientMachine::forwardBackward(const std::vector<Argument>& inArgs,
...
@@ -282,6 +282,44 @@ void MultiGradientMachine::forwardBackward(const std::vector<Argument>& inArgs,
backwardImp
(
callback
);
backwardImp
(
callback
);
}
}
MatrixPtr
MultiGradientMachine
::
getLayerOutput
(
const
std
::
string
&
layerName
)
{
// neural networks are same in each trainer thread
// layer output height = height of layer output * thread nums
auto
nn
=
dynamic_cast
<
NeuralNetwork
*>
(
threads_
[
0
]
->
getGradientMachine
());
auto
height
=
nn
->
getLayerOutput
(
layerName
)
->
getHeight
()
*
threads_
.
size
();
auto
stream
=
HPPL_STREAM_DEFAULT
;
auto
copyLayerOutput
=
[
height
,
stream
](
MatrixPtr
&
dst
,
MatrixPtr
src
,
int
startRow
,
bool
useGpu
)
{
size_t
width
=
src
->
getWidth
();
if
(
!
dst
)
{
dst
=
src
->
clone
(
height
,
width
,
useGpu
);
}
else
{
dst
->
resize
(
height
,
width
);
}
MatrixPtr
tmpMatrix
=
dst
->
subMatrix
(
startRow
,
src
->
getHeight
());
tmpMatrix
->
copyFrom
(
*
src
,
stream
);
};
MatrixPtr
mats
;
size_t
startRow
=
0
;
// copy one layer output from one trainer thread at each time
for
(
auto
&
thread
:
threads_
)
{
auto
nn
=
dynamic_cast
<
NeuralNetwork
*>
(
thread
->
getGradientMachine
());
auto
mat
=
nn
->
getLayerOutput
(
layerName
);
copyLayerOutput
(
mats
,
mat
,
startRow
,
useGpu_
);
startRow
+=
mat
->
getHeight
();
}
if
(
useGpu_
)
{
hl_stream_synchronize
(
HPPL_STREAM_DEFAULT
);
}
return
mats
;
}
void
MultiGradientMachine
::
backwardImp
(
const
UpdateCallback
&
callback
)
{
void
MultiGradientMachine
::
backwardImp
(
const
UpdateCallback
&
callback
)
{
for
(
size_t
i
=
0
;
i
<
parameters_
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
parameters_
.
size
();
i
++
)
{
if
(
!
parameters_
[
i
]
->
useGpu
()
||
parameters_
[
i
]
->
isStatic
())
continue
;
if
(
!
parameters_
[
i
]
->
useGpu
()
||
parameters_
[
i
]
->
isStatic
())
continue
;
...
...
paddle/gserver/gradientmachines/MultiGradientMachine.h
浏览文件 @
0446b488
...
@@ -189,6 +189,8 @@ public:
...
@@ -189,6 +189,8 @@ public:
PassType
passType
,
PassType
passType
,
const
UpdateCallback
&
callback
);
const
UpdateCallback
&
callback
);
virtual
MatrixPtr
getLayerOutput
(
const
std
::
string
&
layerName
);
virtual
void
onPassEnd
();
virtual
void
onPassEnd
();
virtual
void
finish
();
virtual
void
finish
();
...
...
paddle/gserver/gradientmachines/NeuralNetwork.cpp
浏览文件 @
0446b488
...
@@ -298,6 +298,7 @@ MatrixPtr NeuralNetwork::getLayerOutput(const std::string& layerName) {
...
@@ -298,6 +298,7 @@ MatrixPtr NeuralNetwork::getLayerOutput(const std::string& layerName) {
CHECK
(
it
!=
layerMap_
.
end
())
<<
"Cannot find layer: "
<<
layerName
;
CHECK
(
it
!=
layerMap_
.
end
())
<<
"Cannot find layer: "
<<
layerName
;
return
it
->
second
->
getOutputValue
();
return
it
->
second
->
getOutputValue
();
}
}
void
NeuralNetwork
::
onPassEnd
()
{
void
NeuralNetwork
::
onPassEnd
()
{
for
(
auto
&
layer
:
layers_
)
{
for
(
auto
&
layer
:
layers_
)
{
layer
->
onPassEnd
();
layer
->
onPassEnd
();
...
...
paddle/gserver/gradientmachines/NeuralNetwork.h
浏览文件 @
0446b488
...
@@ -87,7 +87,8 @@ public:
...
@@ -87,7 +87,8 @@ public:
virtual
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
);
virtual
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
);
MatrixPtr
getLayerOutput
(
const
std
::
string
&
layerName
);
virtual
MatrixPtr
getLayerOutput
(
const
std
::
string
&
layerName
);
const
LayerPtr
&
getLayer
(
const
std
::
string
&
layerName
)
const
{
const
LayerPtr
&
getLayer
(
const
std
::
string
&
layerName
)
const
{
auto
it
=
layerMap_
.
find
(
layerName
);
auto
it
=
layerMap_
.
find
(
layerName
);
CHECK
(
it
!=
layerMap_
.
end
())
<<
"Unknown layer "
<<
layerName
;
CHECK
(
it
!=
layerMap_
.
end
())
<<
"Unknown layer "
<<
layerName
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录