Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
567871f0
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
567871f0
编写于
12月 21, 2016
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into batch_norm
上级
e4c492d3
4e342208
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
26 addition
and
43 deletion
+26
-43
paddle/gserver/tests/test_ConvTrans.cpp
paddle/gserver/tests/test_ConvTrans.cpp
+6
-6
paddle/gserver/tests/test_ConvUnify.cpp
paddle/gserver/tests/test_ConvUnify.cpp
+6
-23
paddle/parameter/ParameterUpdaterBase.h
paddle/parameter/ParameterUpdaterBase.h
+3
-3
paddle/trainer/ParameterUpdater.h
paddle/trainer/ParameterUpdater.h
+4
-4
paddle/trainer/RemoteParameterUpdater.cpp
paddle/trainer/RemoteParameterUpdater.cpp
+2
-2
paddle/trainer/RemoteParameterUpdater.h
paddle/trainer/RemoteParameterUpdater.h
+2
-2
paddle/trainer/ThreadParameterUpdater.cpp
paddle/trainer/ThreadParameterUpdater.cpp
+1
-1
paddle/trainer/ThreadParameterUpdater.h
paddle/trainer/ThreadParameterUpdater.h
+1
-1
paddle/trainer/Trainer.cpp
paddle/trainer/Trainer.cpp
+1
-1
未找到文件。
paddle/gserver/tests/test_ConvTrans.cpp
浏览文件 @
567871f0
...
...
@@ -206,7 +206,7 @@ TEST(Layer, convTransLayerFwd2) {
/* filter_size */
5
,
result
);
float
resultData
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
real
resultData
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
1
,
2
,
2
,
2
,
1
};
result
->
setData
(
resultData
);
doOneConvtTest
(
/* imgSize */
5
,
...
...
@@ -216,7 +216,7 @@ TEST(Layer, convTransLayerFwd2) {
/* filter_size */
4
,
result
);
float
resultData2
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
real
resultData2
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
1
,
2
,
2
,
2
,
1
};
result
->
setData
(
resultData2
);
doOneConvtTest
(
/* imgSize */
5
,
...
...
@@ -226,7 +226,7 @@ TEST(Layer, convTransLayerFwd2) {
/* filter_size */
5
,
result
);
float
resultData3
[]
=
{
1
,
1
,
2
,
1
,
1
,
1
,
1
,
2
,
1
,
1
,
2
,
2
,
4
,
real
resultData3
[]
=
{
1
,
1
,
2
,
1
,
1
,
1
,
1
,
2
,
1
,
1
,
2
,
2
,
4
,
2
,
2
,
1
,
1
,
2
,
1
,
1
,
1
,
1
,
2
,
1
,
1
};
result
->
setData
(
resultData3
);
doOneConvtTest
(
/* imgSize */
5
,
...
...
paddle/gserver/tests/test_ConvUnify.cpp
浏览文件 @
567871f0
...
...
@@ -106,8 +106,8 @@ TEST(Layer, convParaUnified) {
#ifndef PADDLE_ONLY_CPU
MatrixPtr
input
,
resultCpu
,
resultGpu
;
input
=
Matrix
::
create
(
1
,
4
*
4
,
false
,
false
);
float
inputData
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
};
float
param
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
9
,
8
,
7
,
6
,
5
,
4
,
3
,
2
,
1
};
real
inputData
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
};
real
param
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
9
,
8
,
7
,
6
,
5
,
4
,
3
,
2
,
1
};
input
->
setData
(
inputData
);
...
...
@@ -137,26 +137,9 @@ TEST(Layer, convParaUnified) {
checkMatrixEqual
(
resultCpu
,
resultGpu
);
input
=
Matrix
::
create
(
1
,
3
*
3
*
2
,
false
,
false
);
float
inputData2
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
,
17
,
18
};
float
param2
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
8
,
7
,
6
,
5
,
4
,
3
,
2
,
1
};
real
inputData2
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
,
17
,
18
};
real
param2
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
8
,
7
,
6
,
5
,
4
,
3
,
2
,
1
};
input
->
setData
(
inputData2
);
...
...
@@ -185,7 +168,7 @@ TEST(Layer, convParaUnified) {
true
);
checkMatrixEqual
(
resultCpu
,
resultGpu
);
float
param3
[]
=
{
1
,
2
,
3
,
4
,
4
,
3
,
2
,
1
};
real
param3
[]
=
{
1
,
2
,
3
,
4
,
4
,
3
,
2
,
1
};
resultCpu
=
doOneConvTest
(
/* imgSize */
3
,
/* output_x */
2
,
...
...
paddle/parameter/ParameterUpdaterBase.h
浏览文件 @
567871f0
...
...
@@ -38,7 +38,7 @@ public:
virtual
void
startPass
()
{}
// called by Trainer then finishing a pass, ruturn true if pass accepted
virtual
bool
finishPass
(
real
cost
=
0
)
{
return
true
;
}
virtual
bool
finishPass
()
{
return
true
;
}
// called by Trainer before backward() of a batch
// Return the type of pass it needs. This pass type will be passed
...
...
@@ -112,9 +112,9 @@ public:
[
&
](
int
tid
,
size_t
numThreads
)
{
updaters_
[
tid
]
->
startPass
();
});
}
virtual
bool
finishPass
(
real
cost
=
0
)
{
virtual
bool
finishPass
()
{
syncThreadPool_
->
execPlusOwner
(
[
&
](
int
tid
,
size_t
numThreads
)
{
updaters_
[
tid
]
->
finishPass
(
cost
);
});
[
&
](
int
tid
,
size_t
numThreads
)
{
updaters_
[
tid
]
->
finishPass
();
});
return
true
;
}
...
...
paddle/trainer/ParameterUpdater.h
浏览文件 @
567871f0
...
...
@@ -102,9 +102,9 @@ public:
* @param cost sum cost during one pass.
* @return true if accept (used for owlqn).
*/
virtual
bool
finishPass
(
real
cost
)
{
virtual
bool
finishPass
()
{
optimizer_
->
finishPass
();
return
ParameterUpdater
::
finishPass
(
cost
);
return
ParameterUpdater
::
finishPass
();
}
/**
...
...
@@ -220,9 +220,9 @@ public:
averager_
->
startPass
();
SgdLocalUpdater
::
startPass
();
}
virtual
bool
finishPass
(
real
cost
)
{
virtual
bool
finishPass
()
{
averager_
->
finishPass
();
return
SgdLocalUpdater
::
finishPass
(
cost
);
return
SgdLocalUpdater
::
finishPass
();
}
/// apply the averaged parameter to PARAMETER_VALUE
...
...
paddle/trainer/RemoteParameterUpdater.cpp
浏览文件 @
567871f0
...
...
@@ -309,7 +309,7 @@ void RemoteParameterUpdater::startPass() {
}
}
bool
RemoteParameterUpdater
::
finishPass
(
real
cost
)
{
bool
RemoteParameterUpdater
::
finishPass
()
{
if
(
localUpdater_
)
{
localUpdater_
->
finishPass
();
}
...
...
@@ -712,7 +712,7 @@ void SparseRemoteParameterUpdater::startPass() {
}
}
bool
SparseRemoteParameterUpdater
::
finishPass
(
real
cost
)
{
bool
SparseRemoteParameterUpdater
::
finishPass
()
{
if
(
config_
.
algorithm
()
==
TrainAlgorithm
::
SGD
)
{
parameterClient_
->
waitPassFinish
();
}
else
{
...
...
paddle/trainer/RemoteParameterUpdater.h
浏览文件 @
567871f0
...
...
@@ -90,7 +90,7 @@ public:
*/
virtual
void
finishBatch
(
real
cost
);
virtual
void
startPass
();
virtual
bool
finishPass
(
real
cost
);
virtual
bool
finishPass
();
#ifndef PADDLE_DISABLE_TIMER
virtual
void
setForwardbackwardTime
(
uint64_t
delta
)
{
...
...
@@ -281,7 +281,7 @@ public:
/// send all sparse related parameters to all pservers
virtual
void
finishBatch
(
real
cost
);
virtual
void
startPass
();
virtual
bool
finishPass
(
real
cost
);
virtual
bool
finishPass
();
virtual
void
apply
();
virtual
void
restore
();
...
...
paddle/trainer/ThreadParameterUpdater.cpp
浏览文件 @
567871f0
...
...
@@ -70,7 +70,7 @@ void SgdThreadUpdater::startPass() {
}
}
bool
SgdThreadUpdater
::
finishPass
(
real
cost
)
{
bool
SgdThreadUpdater
::
finishPass
()
{
catchUpWith
();
for
(
auto
&
para
:
parameters_
)
{
...
...
paddle/trainer/ThreadParameterUpdater.h
浏览文件 @
567871f0
...
...
@@ -47,7 +47,7 @@ public:
virtual
void
startPass
();
// Use the finishPass() function of the base optimizer.
virtual
bool
finishPass
(
real
cost
);
virtual
bool
finishPass
();
virtual
void
init
(
const
std
::
vector
<
ParameterPtr
>&
parameters
);
virtual
PassType
startBatch
(
int64_t
batchSize
);
...
...
paddle/trainer/Trainer.cpp
浏览文件 @
567871f0
...
...
@@ -537,7 +537,7 @@ void Trainer::trainOnePassBatch(int passId) {
trainerInternal_
.
getGradientMachine
()
->
onPassEnd
();
bool
accepted
=
trainerInternal_
.
getParameterUpdater
()
->
finishPass
(
cost
);
bool
accepted
=
trainerInternal_
.
getParameterUpdater
()
->
finishPass
();
globalStat
.
setThreadInfo
(
true
);
globalStat
.
printAllStatus
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录