Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
fbf86436
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fbf86436
编写于
2月 22, 2017
作者:
L
liaogang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Update python getLayerOutputs
上级
f846e8fe
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
35 addition
and
46 deletion
+35
-46
demo/image_classification/prediction.py
demo/image_classification/prediction.py
+1
-1
demo/model_zoo/resnet/classify.py
demo/model_zoo/resnet/classify.py
+1
-1
paddle/api/Arguments.cpp
paddle/api/Arguments.cpp
+7
-0
paddle/api/GradientMachine.cpp
paddle/api/GradientMachine.cpp
+2
-3
paddle/api/PaddleAPI.h
paddle/api/PaddleAPI.h
+3
-2
paddle/api/Trainer.cpp
paddle/api/Trainer.cpp
+3
-5
paddle/gserver/gradientmachines/GradientMachine.h
paddle/gserver/gradientmachines/GradientMachine.h
+2
-2
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
+8
-24
paddle/gserver/gradientmachines/MultiGradientMachine.h
paddle/gserver/gradientmachines/MultiGradientMachine.h
+3
-1
paddle/gserver/gradientmachines/NeuralNetwork.cpp
paddle/gserver/gradientmachines/NeuralNetwork.cpp
+2
-4
paddle/gserver/gradientmachines/NeuralNetwork.h
paddle/gserver/gradientmachines/NeuralNetwork.h
+1
-1
paddle/gserver/layers/CosSimLayer.cpp
paddle/gserver/layers/CosSimLayer.cpp
+1
-1
paddle/py_paddle/util.py
paddle/py_paddle/util.py
+1
-1
未找到文件。
demo/image_classification/prediction.py
浏览文件 @
fbf86436
...
@@ -126,7 +126,7 @@ class ImageClassifier():
...
@@ -126,7 +126,7 @@ class ImageClassifier():
# For oversampling, average predictions across crops.
# For oversampling, average predictions across crops.
# If not, the shape of output[name]: (1, class_number),
# If not, the shape of output[name]: (1, class_number),
# the mean is also applicable.
# the mean is also applicable.
return
output
[
output_layer
].
mean
(
0
)
return
output
[
output_layer
]
[
'value'
]
.
mean
(
0
)
def
predict
(
self
,
image
=
None
,
output_layer
=
None
):
def
predict
(
self
,
image
=
None
,
output_layer
=
None
):
assert
isinstance
(
image
,
basestring
)
assert
isinstance
(
image
,
basestring
)
...
...
demo/model_zoo/resnet/classify.py
浏览文件 @
fbf86436
...
@@ -156,7 +156,7 @@ class ImageClassifier():
...
@@ -156,7 +156,7 @@ class ImageClassifier():
# For oversampling, average predictions across crops.
# For oversampling, average predictions across crops.
# If not, the shape of output[name]: (1, class_number),
# If not, the shape of output[name]: (1, class_number),
# the mean is also applicable.
# the mean is also applicable.
res
[
name
]
=
output
[
name
].
mean
(
0
)
res
[
name
]
=
output
[
name
]
[
'value'
]
.
mean
(
0
)
return
res
return
res
...
...
paddle/api/Arguments.cpp
浏览文件 @
fbf86436
...
@@ -38,6 +38,13 @@ Arguments* Arguments::createByPaddleArgumentVector(void* ptr) {
...
@@ -38,6 +38,13 @@ Arguments* Arguments::createByPaddleArgumentVector(void* ptr) {
return
args
;
return
args
;
}
}
Arguments
*
Arguments
::
createByPaddleArgument
(
const
void
*
ptr
)
{
auto
p
=
(
paddle
::
Argument
*
)(
ptr
);
auto
args
=
new
Arguments
();
args
->
m
->
outputs
.
push_back
(
*
p
);
return
args
;
}
Matrix
*
Arguments
::
getSlotValue
(
size_t
idx
)
const
throw
(
RangeError
)
{
Matrix
*
Arguments
::
getSlotValue
(
size_t
idx
)
const
throw
(
RangeError
)
{
auto
&
a
=
m
->
getArg
(
idx
);
auto
&
a
=
m
->
getArg
(
idx
);
return
Matrix
::
createByPaddleMatrixPtr
(
&
a
.
value
);
return
Matrix
::
createByPaddleMatrixPtr
(
&
a
.
value
);
...
...
paddle/api/GradientMachine.cpp
浏览文件 @
fbf86436
...
@@ -144,12 +144,11 @@ Parameter* GradientMachine::getParameter(size_t i) throw(RangeError) {
...
@@ -144,12 +144,11 @@ Parameter* GradientMachine::getParameter(size_t i) throw(RangeError) {
void
GradientMachine
::
randParameters
()
{
m
->
machine
->
randParameters
();
}
void
GradientMachine
::
randParameters
()
{
m
->
machine
->
randParameters
();
}
Matrix
*
GradientMachine
::
getLayerOutput
(
const
std
::
string
&
layerName
)
const
Arguments
*
GradientMachine
::
getLayerOutput
(
const
std
::
string
&
layerName
)
const
throw
(
UnsupportError
)
{
throw
(
UnsupportError
)
{
auto
nn
=
m
->
machine
;
auto
nn
=
m
->
machine
;
if
(
nn
)
{
if
(
nn
)
{
auto
mat
=
nn
->
getLayerOutput
(
layerName
);
return
Arguments
::
createByPaddleArgument
(
&
nn
->
getLayerOutput
(
layerName
));
return
Matrix
::
createByPaddleMatrixPtr
(
&
mat
);
}
else
{
}
else
{
throw
UnsupportError
();
throw
UnsupportError
();
}
}
...
...
paddle/api/PaddleAPI.h
浏览文件 @
fbf86436
...
@@ -454,6 +454,7 @@ public:
...
@@ -454,6 +454,7 @@ public:
private:
private:
static
Arguments
*
createByPaddleArgumentVector
(
void
*
ptr
);
static
Arguments
*
createByPaddleArgumentVector
(
void
*
ptr
);
static
Arguments
*
createByPaddleArgument
(
const
void
*
ptr
);
void
*
getInternalArgumentsPtr
()
const
;
void
*
getInternalArgumentsPtr
()
const
;
private:
private:
...
@@ -769,7 +770,7 @@ public:
...
@@ -769,7 +770,7 @@ public:
void
randParameters
();
void
randParameters
();
Matrix
*
getLayerOutput
(
const
std
::
string
&
layerName
)
const
Arguments
*
getLayerOutput
(
const
std
::
string
&
layerName
)
const
throw
(
UnsupportError
);
throw
(
UnsupportError
);
/**
/**
...
@@ -952,7 +953,7 @@ public:
...
@@ -952,7 +953,7 @@ public:
Arguments
*
getForwardOutput
();
Arguments
*
getForwardOutput
();
Matrix
*
getLayerOutput
(
const
std
::
string
&
layerName
);
Arguments
*
getLayerOutput
(
const
std
::
string
&
layerName
);
};
};
/// the N-Best results generated from one input sequence.
/// the N-Best results generated from one input sequence.
...
...
paddle/api/Trainer.cpp
浏览文件 @
fbf86436
...
@@ -131,12 +131,10 @@ void Trainer::testOneDataBatch(size_t batchSize, const Arguments& args) {
...
@@ -131,12 +131,10 @@ void Trainer::testOneDataBatch(size_t batchSize, const Arguments& args) {
void
TrainerPrivate
::
finishTestPeriod
()
{
tester_
->
finishTestPeriod
();
}
void
TrainerPrivate
::
finishTestPeriod
()
{
tester_
->
finishTestPeriod
();
}
void
Trainer
::
finishTestPeriod
()
{
m
->
finishTestPeriod
();
}
void
Trainer
::
finishTestPeriod
()
{
m
->
finishTestPeriod
();
}
Matrix
*
Trainer
::
getLayerOutput
(
const
std
::
string
&
layerName
)
{
Arguments
*
Trainer
::
getLayerOutput
(
const
std
::
string
&
layerName
)
{
auto
nn
=
std
::
dynamic_pointer_cast
<
paddle
::
NeuralNetwork
>
(
auto
nn
=
this
->
m
->
getGradientMachine
();
this
->
m
->
getGradientMachine
());
CHECK
(
nn
)
<<
"trainerInternal_.getGradientMachine() is not NeuralNetwork"
;
CHECK
(
nn
)
<<
"trainerInternal_.getGradientMachine() is not NeuralNetwork"
;
auto
m
=
nn
->
getLayerOutput
(
layerName
);
return
Arguments
::
createByPaddleArgument
(
&
nn
->
getLayerOutput
(
layerName
));
return
Matrix
::
createByPaddleMatrixPtr
(
&
m
);
}
}
void
Trainer
::
forwardOneBatch
(
size_t
batchSize
)
{
void
Trainer
::
forwardOneBatch
(
size_t
batchSize
)
{
...
...
paddle/gserver/gradientmachines/GradientMachine.h
浏览文件 @
fbf86436
...
@@ -134,8 +134,8 @@ public:
...
@@ -134,8 +134,8 @@ public:
backward
(
callback
);
backward
(
callback
);
}
}
virtual
MatrixPtr
getLayerOutput
(
const
std
::
string
&
layerName
)
const
{
virtual
const
Argument
&
getLayerOutput
(
const
std
::
string
&
layerName
)
{
return
nullptr
;
return
*
((
Argument
*
)
nullptr
)
;
}
}
// see comment in Layer.h for the function with the same name
// see comment in Layer.h for the function with the same name
...
...
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
浏览文件 @
fbf86436
...
@@ -282,33 +282,17 @@ void MultiGradientMachine::forwardBackward(const std::vector<Argument>& inArgs,
...
@@ -282,33 +282,17 @@ void MultiGradientMachine::forwardBackward(const std::vector<Argument>& inArgs,
backwardImp
(
callback
);
backwardImp
(
callback
);
}
}
MatrixPtr
MultiGradientMachine
::
getLayerOutput
(
const
Argument
&
MultiGradientMachine
::
getLayerOutput
(
const
std
::
string
&
layerName
)
const
{
const
std
::
string
&
layerName
)
{
// each thread has the same neural network
std
::
vector
<
Argument
>
args
;
auto
nn
=
threads_
[
0
]
->
getGradientMachine
();
args
.
reserve
(
threads_
.
size
());
size_t
height
=
0
;
size_t
width
=
nn
->
getLayerOutput
(
layerName
)
->
getWidth
();
std
::
vector
<
MatrixPtr
>
mats
;
mats
.
reserve
(
threads_
.
size
());
for
(
auto
&
thread
:
threads_
)
{
MatrixPtr
out
=
thread
->
getGradientMachine
()
->
getLayerOutput
(
layerName
);
mats
.
push_back
(
out
);
height
+=
out
->
getHeight
();
CHECK_EQ
(
width
,
out
->
getWidth
());
}
MatrixPtr
layerOutput
;
for
(
auto
&
thread
:
threads_
)
{
Matrix
::
resizeOrCreate
(
layerOutput
,
height
,
width
,
false
,
false
);
args
.
push_back
(
thread
->
getGradientMachine
()
->
getLayerOutput
(
layerName
));
// copy one layer output from one trainer thread at each time
size_t
startRow
=
0
;
for
(
auto
&
mat
:
mats
)
{
auto
tmpMatrix
=
layerOutput
->
subMatrix
(
startRow
,
mat
->
getHeight
());
tmpMatrix
->
copyFrom
(
*
mat
);
startRow
+=
mat
->
getHeight
();
}
}
outLayerArgs_
.
concat
(
args
,
false
/* use_gpu */
,
outArgStream_
,
passType_
);
return
layerOutput
;
return
outLayerArgs_
;
}
}
void
MultiGradientMachine
::
backwardImp
(
const
UpdateCallback
&
callback
)
{
void
MultiGradientMachine
::
backwardImp
(
const
UpdateCallback
&
callback
)
{
...
...
paddle/gserver/gradientmachines/MultiGradientMachine.h
浏览文件 @
fbf86436
...
@@ -189,7 +189,7 @@ public:
...
@@ -189,7 +189,7 @@ public:
PassType
passType
,
PassType
passType
,
const
UpdateCallback
&
callback
);
const
UpdateCallback
&
callback
);
virtual
MatrixPtr
getLayerOutput
(
const
std
::
string
&
layerName
)
const
;
virtual
const
Argument
&
getLayerOutput
(
const
std
::
string
&
layerName
)
;
virtual
void
onPassEnd
();
virtual
void
onPassEnd
();
...
@@ -316,6 +316,8 @@ protected:
...
@@ -316,6 +316,8 @@ protected:
std
::
vector
<
Argument
>
outArgs_
;
std
::
vector
<
Argument
>
outArgs_
;
hl_stream_t
outArgStream_
;
hl_stream_t
outArgStream_
;
Argument
outLayerArgs_
;
/// ParameterType which needs to be merged from each GPU
/// ParameterType which needs to be merged from each GPU
std
::
vector
<
ParameterType
>
mergeTypes_
;
std
::
vector
<
ParameterType
>
mergeTypes_
;
int
numDevices_
;
/* number of gpu devices */
int
numDevices_
;
/* number of gpu devices */
...
...
paddle/gserver/gradientmachines/NeuralNetwork.cpp
浏览文件 @
fbf86436
...
@@ -293,10 +293,8 @@ void NeuralNetwork::backward(const UpdateCallback& callback) {
...
@@ -293,10 +293,8 @@ void NeuralNetwork::backward(const UpdateCallback& callback) {
}
}
}
}
MatrixPtr
NeuralNetwork
::
getLayerOutput
(
const
std
::
string
&
layerName
)
const
{
const
Argument
&
NeuralNetwork
::
getLayerOutput
(
const
std
::
string
&
layerName
)
{
auto
it
=
layerMap_
.
find
(
layerName
);
return
getLayer
(
layerName
)
->
getOutput
();
CHECK
(
it
!=
layerMap_
.
end
())
<<
"Cannot find layer: "
<<
layerName
;
return
it
->
second
->
getOutputValue
();
}
}
void
NeuralNetwork
::
onPassEnd
()
{
void
NeuralNetwork
::
onPassEnd
()
{
...
...
paddle/gserver/gradientmachines/NeuralNetwork.h
浏览文件 @
fbf86436
...
@@ -87,7 +87,7 @@ public:
...
@@ -87,7 +87,7 @@ public:
virtual
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
);
virtual
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
);
virtual
MatrixPtr
getLayerOutput
(
const
std
::
string
&
layerName
)
const
;
virtual
const
Argument
&
getLayerOutput
(
const
std
::
string
&
layerName
)
;
const
LayerPtr
&
getLayer
(
const
std
::
string
&
layerName
)
const
{
const
LayerPtr
&
getLayer
(
const
std
::
string
&
layerName
)
const
{
auto
it
=
layerMap_
.
find
(
layerName
);
auto
it
=
layerMap_
.
find
(
layerName
);
...
...
paddle/gserver/layers/CosSimLayer.cpp
浏览文件 @
fbf86436
...
@@ -68,7 +68,7 @@ void CosSimLayer::forward(PassType passType) {
...
@@ -68,7 +68,7 @@ void CosSimLayer::forward(PassType passType) {
void
CosSimLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
void
CosSimLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
/* activation */
{
/* activation */
{
REGISTER_TIMER_INFO
(
"CosBpAtvTimer"
,
getName
().
c_str
());
REGISTER_TIMER_INFO
(
"CosBpAtvTimer"
,
getName
().
c_str
());
CHECK_EQ
(
backward_
.
size
(),
1
)
<<
"Only one backward function needed"
;
CHECK_EQ
(
backward_
.
size
(),
1
UL
)
<<
"Only one backward function needed"
;
const
auto
outG
=
this
->
getOutputGrad
();
const
auto
outG
=
this
->
getOutputGrad
();
const
auto
outV
=
this
->
getOutputValue
();
const
auto
outV
=
this
->
getOutputValue
();
...
...
paddle/py_paddle/util.py
浏览文件 @
fbf86436
...
@@ -208,7 +208,7 @@ def __monkeypatch_gradient_machine__():
...
@@ -208,7 +208,7 @@ def __monkeypatch_gradient_machine__():
output
=
dict
()
output
=
dict
()
for
name
in
layerNames
:
for
name
in
layerNames
:
output
[
name
]
=
__
matrix_to_numpy__
(
self
.
getLayerOutput
(
name
))
output
[
name
]
=
__
arguments_to_numpy__
(
0
,
self
.
getLayerOutput
(
name
))
return
output
return
output
swig_paddle
.
GradientMachine
.
getLayerOutputs
=
getLayerOutputs
swig_paddle
.
GradientMachine
.
getLayerOutputs
=
getLayerOutputs
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录