Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b063b0c4
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b063b0c4
编写于
12月 21, 2016
作者:
P
Peng Li
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into fix-crf-weight-and-coeff-bug
上级
af820fb2
28c5010c
变更
25
隐藏空白更改
内联
并排
Showing
25 changed file
with
110 addition
and
78 deletion
+110
-78
.travis.yml
.travis.yml
+1
-1
paddle/gserver/gradientmachines/GradientMachine.h
paddle/gserver/gradientmachines/GradientMachine.h
+1
-5
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
+1
-1
paddle/gserver/gradientmachines/MultiNetwork.cpp
paddle/gserver/gradientmachines/MultiNetwork.cpp
+2
-3
paddle/gserver/gradientmachines/MultiNetwork.h
paddle/gserver/gradientmachines/MultiNetwork.h
+1
-1
paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp
paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp
+1
-5
paddle/gserver/gradientmachines/ParallelNeuralNetwork.h
paddle/gserver/gradientmachines/ParallelNeuralNetwork.h
+1
-1
paddle/gserver/tests/LayerGradUtil.cpp
paddle/gserver/tests/LayerGradUtil.cpp
+31
-5
paddle/gserver/tests/LayerGradUtil.h
paddle/gserver/tests/LayerGradUtil.h
+20
-0
paddle/gserver/tests/test_ConvTrans.cpp
paddle/gserver/tests/test_ConvTrans.cpp
+6
-6
paddle/gserver/tests/test_ConvUnify.cpp
paddle/gserver/tests/test_ConvUnify.cpp
+6
-23
paddle/gserver/tests/test_NetworkCompare.cpp
paddle/gserver/tests/test_NetworkCompare.cpp
+1
-1
paddle/gserver/tests/test_RecurrentGradientMachine.cpp
paddle/gserver/tests/test_RecurrentGradientMachine.cpp
+1
-1
paddle/parameter/ParameterUpdaterBase.h
paddle/parameter/ParameterUpdaterBase.h
+3
-3
paddle/scripts/travis/docs.sh
paddle/scripts/travis/docs.sh
+6
-2
paddle/trainer/ParameterUpdater.h
paddle/trainer/ParameterUpdater.h
+4
-4
paddle/trainer/RemoteParameterUpdater.cpp
paddle/trainer/RemoteParameterUpdater.cpp
+2
-2
paddle/trainer/RemoteParameterUpdater.h
paddle/trainer/RemoteParameterUpdater.h
+2
-2
paddle/trainer/Tester.cpp
paddle/trainer/Tester.cpp
+1
-1
paddle/trainer/ThreadParameterUpdater.cpp
paddle/trainer/ThreadParameterUpdater.cpp
+1
-1
paddle/trainer/ThreadParameterUpdater.h
paddle/trainer/ThreadParameterUpdater.h
+1
-1
paddle/trainer/Trainer.cpp
paddle/trainer/Trainer.cpp
+3
-3
paddle/trainer/tests/test_Compare.cpp
paddle/trainer/tests/test_Compare.cpp
+1
-1
paddle/trainer/tests/test_CompareTwoNets.cpp
paddle/trainer/tests/test_CompareTwoNets.cpp
+1
-1
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+12
-4
未找到文件。
.travis.yml
浏览文件 @
b063b0c4
...
...
@@ -56,7 +56,7 @@ before_install:
-
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi
-
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi
-
if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
-
pip install wheel protobuf sphinx recommonmark virtualenv numpy sphinx_rtd_theme pre-commit
-
pip install wheel protobuf sphinx recommonmark virtualenv numpy sphinx_rtd_theme pre-commit
requests==2.9.2 LinkChecker
script
:
-
paddle/scripts/travis/main.sh
notifications
:
...
...
paddle/gserver/gradientmachines/GradientMachine.h
浏览文件 @
b063b0c4
...
...
@@ -212,11 +212,7 @@ public:
* @note This function will only been implemented and used in a
* multithreaded environment.
*/
virtual
void
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
)
{
(
void
)
config
;
(
void
)
dataProvider
;
}
virtual
void
start
()
{}
/**
* @brief check each work-thread whether is failed/error/finish,
...
...
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
浏览文件 @
b063b0c4
...
...
@@ -441,7 +441,7 @@ TrainerThread::TrainerThread(const ModelConfig& config,
TrainerThread
::~
TrainerThread
()
{
stop
();
}
void
TrainerThread
::
start
()
{
gradientMachine_
->
start
(
*
(
TrainerConfig
*
)
nullptr
,
(
DataProviderPtr
)
nullptr
);
gradientMachine_
->
start
();
computeThread_
.
reset
(
new
std
::
thread
([
this
]()
{
computeThread
();
}));
...
...
paddle/gserver/gradientmachines/MultiNetwork.cpp
浏览文件 @
b063b0c4
...
...
@@ -109,10 +109,9 @@ void MultiNetwork::onPassEnd() {
}
}
void
MultiNetwork
::
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
)
{
void
MultiNetwork
::
start
()
{
for
(
auto
&
subNetwork
:
subNetworks_
)
{
subNetwork
->
start
(
config
,
dataProvider
);
subNetwork
->
start
();
}
}
...
...
paddle/gserver/gradientmachines/MultiNetwork.h
浏览文件 @
b063b0c4
...
...
@@ -54,7 +54,7 @@ public:
return
subNetworks_
;
}
virtual
void
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
);
virtual
void
start
();
virtual
void
finish
();
...
...
paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp
浏览文件 @
b063b0c4
...
...
@@ -131,11 +131,7 @@ void ParallelNeuralNetwork::forwardBackward(const std::vector<Argument>& inArgs,
backward
(
callback
);
}
void
ParallelNeuralNetwork
::
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
)
{
(
void
)
config
;
(
void
)
dataProvider
;
void
ParallelNeuralNetwork
::
start
()
{
for
(
auto
&
thread
:
threads_
)
{
thread
->
start
();
}
...
...
paddle/gserver/gradientmachines/ParallelNeuralNetwork.h
浏览文件 @
b063b0c4
...
...
@@ -56,7 +56,7 @@ public:
PassType
passType
,
const
UpdateCallback
&
callback
=
NULL
);
virtual
void
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
);
virtual
void
start
();
void
addComputeThread
(
int
deviceId
);
...
...
paddle/gserver/tests/LayerGradUtil.cpp
浏览文件 @
b063b0c4
...
...
@@ -303,13 +303,31 @@ void initDataLayer(TestConfig testConf,
ICpuGpuVectorPtr
sequenceStartPositions
;
ICpuGpuVectorPtr
subSequenceStartPositions
;
IVectorPtr
cpuSequenceDims
;
for
(
size_t
i
=
0
;
i
<
testConf
.
inputDefs
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
testConf
.
inputDefs
.
size
();
++
i
)
{
if
(
testConf
.
inputDefs
[
i
].
inputType
!=
INPUT_SEQUENCE_LABEL
)
continue
;
const
std
::
vector
<
int
>&
labelSeqStartPositions
=
testConf
.
inputDefs
[
i
].
labelSeqStartPositions
;
if
(
labelSeqStartPositions
.
size
()
!=
0
)
{
CHECK
(
!
sequenceStartPositions
);
CHECK_GE
(
labelSeqStartPositions
.
size
(),
2
);
sequenceStartPositions
=
ICpuGpuVector
::
create
(
labelSeqStartPositions
.
size
(),
useGpu
);
sequenceStartPositions
->
copyFrom
(
labelSeqStartPositions
.
data
(),
labelSeqStartPositions
.
size
(),
useGpu
);
}
}
for
(
size_t
i
=
0
;
i
<
testConf
.
inputDefs
.
size
();
++
i
)
{
LayerConfig
config
;
config
.
set_name
(
testConf
.
inputDefs
[
i
].
name
);
config
.
set_type
(
"data"
);
config
.
set_size
(
testConf
.
inputDefs
[
i
].
dim
);
LayerPtr
layer
=
LayerPtr
(
new
DataLayer
(
config
));
size_t
numSequence
=
batchSize
/
10
+
1
;
size_t
numSequence
=
sequenceStartPositions
?
sequenceStartPositions
->
getSize
()
-
1
:
batchSize
/
10
+
1
;
Argument
data
;
auto
fillData
=
[
&
](
bool
trans
,
int
height
,
int
width
)
{
...
...
@@ -336,9 +354,17 @@ void initDataLayer(TestConfig testConf,
break
;
case
INPUT_LABEL
:
case
INPUT_SEQUENCE_LABEL
:
data
.
ids
=
VectorT
<
int
>::
create
(
batchSize
,
useGpu
);
// now rand number can be 0 to inputDefs[i].dim
data
.
ids
->
rand
(
testConf
.
inputDefs
[
i
].
dim
);
if
(
testConf
.
inputDefs
[
i
].
labelInitValue
.
size
()
!=
0
)
{
const
std
::
vector
<
int
>&
labelInitValue
=
testConf
.
inputDefs
[
i
].
labelInitValue
;
CHECK_EQ
(
labelInitValue
.
size
(),
batchSize
);
data
.
ids
=
VectorT
<
int
>::
create
(
batchSize
,
useGpu
);
data
.
ids
->
copyFrom
(
labelInitValue
.
data
(),
batchSize
);
}
else
{
data
.
ids
=
VectorT
<
int
>::
create
(
batchSize
,
useGpu
);
// now rand number can be 0 to inputDefs[i].dim
data
.
ids
->
rand
(
testConf
.
inputDefs
[
i
].
dim
);
}
break
;
case
INPUT_SPARSE_NON_VALUE_DATA
:
data
.
value
=
makeRandomSparseMatrix
(
...
...
paddle/gserver/tests/LayerGradUtil.h
浏览文件 @
b063b0c4
...
...
@@ -64,6 +64,9 @@ struct InputDef {
size_t
paraSize
;
ParaSparse
sparse
;
bool
isStatic
;
std
::
vector
<
int
>
labelInitValue
;
std
::
vector
<
int
>
labelSeqStartPositions
;
InputDef
(
InputType
type
,
string
nameIn
,
size_t
dimIn
,
size_t
sizeIn
)
{
inputType
=
type
;
name
=
nameIn
;
...
...
@@ -72,6 +75,23 @@ struct InputDef {
sparse
=
{
""
};
isStatic
=
false
;
}
InputDef
(
InputType
type
,
string
nameIn
,
size_t
dimIn
,
size_t
sizeIn
,
const
std
::
vector
<
int
>&
labelInitValue
,
const
std
::
vector
<
int
>&
labelSeqStartPositions
)
:
labelInitValue
(
labelInitValue
),
labelSeqStartPositions
(
labelSeqStartPositions
)
{
inputType
=
type
;
name
=
nameIn
;
dim
=
dimIn
;
paraSize
=
sizeIn
;
sparse
=
{
""
};
isStatic
=
false
;
}
InputDef
(
InputType
type
,
string
nameIn
,
size_t
dimIn
,
...
...
paddle/gserver/tests/test_ConvTrans.cpp
浏览文件 @
b063b0c4
...
...
@@ -206,8 +206,8 @@ TEST(Layer, convTransLayerFwd2) {
/* filter_size */
5
,
result
);
float
resultData
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
1
,
2
,
2
,
2
,
1
};
real
resultData
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
1
,
2
,
2
,
2
,
1
};
result
->
setData
(
resultData
);
doOneConvtTest
(
/* imgSize */
5
,
/* output_x */
2
,
...
...
@@ -216,8 +216,8 @@ TEST(Layer, convTransLayerFwd2) {
/* filter_size */
4
,
result
);
float
resultData2
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
1
,
2
,
2
,
2
,
1
};
real
resultData2
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
1
,
2
,
2
,
2
,
1
};
result
->
setData
(
resultData2
);
doOneConvtTest
(
/* imgSize */
5
,
/* output_x */
2
,
...
...
@@ -226,8 +226,8 @@ TEST(Layer, convTransLayerFwd2) {
/* filter_size */
5
,
result
);
float
resultData3
[]
=
{
1
,
1
,
2
,
1
,
1
,
1
,
1
,
2
,
1
,
1
,
2
,
2
,
4
,
2
,
2
,
1
,
1
,
2
,
1
,
1
,
1
,
1
,
2
,
1
,
1
};
real
resultData3
[]
=
{
1
,
1
,
2
,
1
,
1
,
1
,
1
,
2
,
1
,
1
,
2
,
2
,
4
,
2
,
2
,
1
,
1
,
2
,
1
,
1
,
1
,
1
,
2
,
1
,
1
};
result
->
setData
(
resultData3
);
doOneConvtTest
(
/* imgSize */
5
,
/* output_x */
2
,
...
...
paddle/gserver/tests/test_ConvUnify.cpp
浏览文件 @
b063b0c4
...
...
@@ -106,8 +106,8 @@ TEST(Layer, convParaUnified) {
#ifndef PADDLE_ONLY_CPU
MatrixPtr
input
,
resultCpu
,
resultGpu
;
input
=
Matrix
::
create
(
1
,
4
*
4
,
false
,
false
);
float
inputData
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
};
float
param
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
9
,
8
,
7
,
6
,
5
,
4
,
3
,
2
,
1
};
real
inputData
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
};
real
param
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
9
,
8
,
7
,
6
,
5
,
4
,
3
,
2
,
1
};
input
->
setData
(
inputData
);
...
...
@@ -137,26 +137,9 @@ TEST(Layer, convParaUnified) {
checkMatrixEqual
(
resultCpu
,
resultGpu
);
input
=
Matrix
::
create
(
1
,
3
*
3
*
2
,
false
,
false
);
float
inputData2
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
,
17
,
18
};
float
param2
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
8
,
7
,
6
,
5
,
4
,
3
,
2
,
1
};
real
inputData2
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
,
17
,
18
};
real
param2
[]
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
8
,
7
,
6
,
5
,
4
,
3
,
2
,
1
};
input
->
setData
(
inputData2
);
...
...
@@ -185,7 +168,7 @@ TEST(Layer, convParaUnified) {
true
);
checkMatrixEqual
(
resultCpu
,
resultGpu
);
float
param3
[]
=
{
1
,
2
,
3
,
4
,
4
,
3
,
2
,
1
};
real
param3
[]
=
{
1
,
2
,
3
,
4
,
4
,
3
,
2
,
1
};
resultCpu
=
doOneConvTest
(
/* imgSize */
3
,
/* output_x */
2
,
...
...
paddle/gserver/tests/test_NetworkCompare.cpp
浏览文件 @
b063b0c4
...
...
@@ -114,7 +114,7 @@ void calcGradient(DataIn& in, DataOut& out, const std::string& configPath) {
parameters
[
i
]
->
getBuf
(
PARAMETER_VALUE
)
->
copyFrom
(
*
in
.
paraValues
[
i
]);
}
}
gradientMachine
->
start
(
trainer
.
getConfig
(),
nullptr
);
gradientMachine
->
start
();
gradientMachine
->
forward
(
in
.
inArgs
,
&
outArgs
,
PASS_TRAIN
);
for
(
size_t
i
=
0
;
i
<
in
.
outGrads
.
size
();
i
++
)
{
// If the all the layers in the config have no parameters, also
...
...
paddle/gserver/tests/test_RecurrentGradientMachine.cpp
浏览文件 @
b063b0c4
...
...
@@ -28,7 +28,7 @@ class TrainerForTest : public paddle::Trainer {
public:
void
startTrain
()
{
GradientMachine
&
gm
=
*
this
->
trainerInternal_
.
getGradientMachine
();
gm
.
start
(
this
->
getConfig
(),
dataProvider_
);
gm
.
start
();
}
void
finishTrain
()
{
...
...
paddle/parameter/ParameterUpdaterBase.h
浏览文件 @
b063b0c4
...
...
@@ -38,7 +38,7 @@ public:
virtual
void
startPass
()
{}
// called by Trainer then finishing a pass, ruturn true if pass accepted
virtual
bool
finishPass
(
real
cost
=
0
)
{
return
true
;
}
virtual
bool
finishPass
()
{
return
true
;
}
// called by Trainer before backward() of a batch
// Return the type of pass it needs. This pass type will be passed
...
...
@@ -112,9 +112,9 @@ public:
[
&
](
int
tid
,
size_t
numThreads
)
{
updaters_
[
tid
]
->
startPass
();
});
}
virtual
bool
finishPass
(
real
cost
=
0
)
{
virtual
bool
finishPass
()
{
syncThreadPool_
->
execPlusOwner
(
[
&
](
int
tid
,
size_t
numThreads
)
{
updaters_
[
tid
]
->
finishPass
(
cost
);
});
[
&
](
int
tid
,
size_t
numThreads
)
{
updaters_
[
tid
]
->
finishPass
();
});
return
true
;
}
...
...
paddle/scripts/travis/docs.sh
浏览文件 @
b063b0c4
...
...
@@ -7,6 +7,10 @@ source ./common.sh
cmake ..
-DCMAKE_BUILD_TYPE
=
Debug
-DWITH_GPU
=
OFF
-DWITH_DOC
=
ON
make paddle_docs paddle_docs_cn
# check websites for broken links
linkchecker doc/en/html/index.html
linkchecker doc/cn/html/index.html
# Parse Github URL
REPO
=
`
git config remote.origin.url
`
SSH_REPO
=
${
REPO
/https
:
\/\/github.com\//git@github.com
:
}
...
...
@@ -35,8 +39,8 @@ git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH
# remove old docs. mv new docs.
rm
-rf
doc doc_cn
mv
../doc
_
cn/html doc_cn
mv
../doc/html doc
mv
../doc
/
cn/html doc_cn
mv
../doc/
en/
html doc
# Check is there anything changed.
set
+e
...
...
paddle/trainer/ParameterUpdater.h
浏览文件 @
b063b0c4
...
...
@@ -102,9 +102,9 @@ public:
* @param cost sum cost during one pass.
* @return true if accept (used for owlqn).
*/
virtual
bool
finishPass
(
real
cost
)
{
virtual
bool
finishPass
()
{
optimizer_
->
finishPass
();
return
ParameterUpdater
::
finishPass
(
cost
);
return
ParameterUpdater
::
finishPass
();
}
/**
...
...
@@ -220,9 +220,9 @@ public:
averager_
->
startPass
();
SgdLocalUpdater
::
startPass
();
}
virtual
bool
finishPass
(
real
cost
)
{
virtual
bool
finishPass
()
{
averager_
->
finishPass
();
return
SgdLocalUpdater
::
finishPass
(
cost
);
return
SgdLocalUpdater
::
finishPass
();
}
/// apply the averaged parameter to PARAMETER_VALUE
...
...
paddle/trainer/RemoteParameterUpdater.cpp
浏览文件 @
b063b0c4
...
...
@@ -309,7 +309,7 @@ void RemoteParameterUpdater::startPass() {
}
}
bool
RemoteParameterUpdater
::
finishPass
(
real
cost
)
{
bool
RemoteParameterUpdater
::
finishPass
()
{
if
(
localUpdater_
)
{
localUpdater_
->
finishPass
();
}
...
...
@@ -712,7 +712,7 @@ void SparseRemoteParameterUpdater::startPass() {
}
}
bool
SparseRemoteParameterUpdater
::
finishPass
(
real
cost
)
{
bool
SparseRemoteParameterUpdater
::
finishPass
()
{
if
(
config_
.
algorithm
()
==
TrainAlgorithm
::
SGD
)
{
parameterClient_
->
waitPassFinish
();
}
else
{
...
...
paddle/trainer/RemoteParameterUpdater.h
浏览文件 @
b063b0c4
...
...
@@ -90,7 +90,7 @@ public:
*/
virtual
void
finishBatch
(
real
cost
);
virtual
void
startPass
();
virtual
bool
finishPass
(
real
cost
);
virtual
bool
finishPass
();
#ifndef PADDLE_DISABLE_TIMER
virtual
void
setForwardbackwardTime
(
uint64_t
delta
)
{
...
...
@@ -281,7 +281,7 @@ public:
/// send all sparse related parameters to all pservers
virtual
void
finishBatch
(
real
cost
);
virtual
void
startPass
();
virtual
bool
finishPass
(
real
cost
);
virtual
bool
finishPass
();
virtual
void
apply
();
virtual
void
restore
();
...
...
paddle/trainer/Tester.cpp
浏览文件 @
b063b0c4
...
...
@@ -257,7 +257,7 @@ void Tester::test() {
CHECK
(
testDataProvider_
)
<<
"TestData is not specified"
;
testDataProvider_
->
setSkipShuffle
();
testDataProvider_
->
reset
();
gradientMachine_
->
start
(
*
config_
,
testDataProvider_
);
gradientMachine_
->
start
();
// For evaluation
std
::
vector
<
std
::
string
>
modelList
;
...
...
paddle/trainer/ThreadParameterUpdater.cpp
浏览文件 @
b063b0c4
...
...
@@ -70,7 +70,7 @@ void SgdThreadUpdater::startPass() {
}
}
bool
SgdThreadUpdater
::
finishPass
(
real
cost
)
{
bool
SgdThreadUpdater
::
finishPass
()
{
catchUpWith
();
for
(
auto
&
para
:
parameters_
)
{
...
...
paddle/trainer/ThreadParameterUpdater.h
浏览文件 @
b063b0c4
...
...
@@ -47,7 +47,7 @@ public:
virtual
void
startPass
();
// Use the finishPass() function of the base optimizer.
virtual
bool
finishPass
(
real
cost
);
virtual
bool
finishPass
();
virtual
void
init
(
const
std
::
vector
<
ParameterPtr
>&
parameters
);
virtual
PassType
startBatch
(
int64_t
batchSize
);
...
...
paddle/trainer/Trainer.cpp
浏览文件 @
b063b0c4
...
...
@@ -308,7 +308,7 @@ static double genPerturbation(real* d, real* grad, size_t dim) {
}
real
Trainer
::
checkGradient
()
{
trainerInternal_
.
getGradientMachine
()
->
start
(
*
config_
,
dataProvider_
);
trainerInternal_
.
getGradientMachine
()
->
start
();
std
::
vector
<
ParameterPtr
>&
parameters
=
trainerInternal_
.
getGradientMachine
()
->
getNonStaticParameters
();
DataBatch
dataBatch
;
...
...
@@ -390,7 +390,7 @@ void Trainer::startTrain() {
dataProvider_
->
reset
();
}
trainerInternal_
.
getGradientMachine
()
->
start
(
*
config_
,
dataProvider_
);
trainerInternal_
.
getGradientMachine
()
->
start
();
}
void
Trainer
::
finishTrain
()
{
trainerInternal_
.
getGradientMachine
()
->
finish
();
}
...
...
@@ -537,7 +537,7 @@ void Trainer::trainOnePassBatch(int passId) {
trainerInternal_
.
getGradientMachine
()
->
onPassEnd
();
bool
accepted
=
trainerInternal_
.
getParameterUpdater
()
->
finishPass
(
cost
);
bool
accepted
=
trainerInternal_
.
getParameterUpdater
()
->
finishPass
();
globalStat
.
setThreadInfo
(
true
);
globalStat
.
printAllStatus
();
...
...
paddle/trainer/tests/test_Compare.cpp
浏览文件 @
b063b0c4
...
...
@@ -50,7 +50,7 @@ void calcGradient(bool useGpu, comData& Data) {
trainer
.
getDataProvider
()
->
getNextBatch
(
batchSize
,
&
dataBatch
);
CHECK
(
dataBatch
.
getSize
())
<<
"No data from data provider"
;
vector
<
Argument
>&
inArgs
=
dataBatch
.
getStreams
();
trainer
.
getGradientMachine
()
->
start
(
trainer
.
getConfig
(),
nullptr
);
trainer
.
getGradientMachine
()
->
start
();
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
trainer
.
getGradientMachine
()
->
forwardBackward
(
inArgs
,
&
Data
.
outArgs
,
PASS_TRAIN
);
...
...
paddle/trainer/tests/test_CompareTwoNets.cpp
浏览文件 @
b063b0c4
...
...
@@ -72,7 +72,7 @@ void calcGradient(ComData& data, const string configFile) {
CHECK
(
dataBatch
.
getSize
())
<<
"No data from data provider"
;
vector
<
Argument
>&
inArgs
=
dataBatch
.
getStreams
();
trainer
.
getGradientMachine
()
->
start
(
trainer
.
getConfig
(),
nullptr
);
trainer
.
getGradientMachine
()
->
start
();
trainer
.
getGradientMachine
()
->
forwardBackward
(
inArgs
,
&
data
.
outArgs
,
PASS_TRAIN
);
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
b063b0c4
...
...
@@ -498,9 +498,16 @@ class Input(Cfg):
is_static
=
None
,
is_shared
=
None
,
update_hooks
=
None
,
input_layer_argument
=
None
,
):
input_layer_argument
=
None
,
make_layer_name_in_submodel
=
True
,
):
"""
@param make_layer_name_in_submodel True by defalut, you might need to
set it carefully when adding Input in config_parser.py.
"""
self
.
add_keys
(
locals
())
self
.
input_layer_name
=
MakeLayerNameInSubmodel
(
input_layer_name
)
self
.
input_layer_name
=
MakeLayerNameInSubmodel
(
input_layer_name
)
if
make_layer_name_in_submodel
else
input_layer_name
# Define a projection for iexed layer
...
...
@@ -1848,7 +1855,8 @@ class BatchNormLayer(LayerBase):
initial_std
=
0.0
,
initial_mean
=
0.0
,
is_static
=
True
,
is_shared
=
is_shared
,
))
is_shared
=
is_shared
,
make_layer_name_in_submodel
=
False
,
))
parallel_nn
=
bool
(
int
(
g_command_config_args
.
get
(
"parallel_nn"
,
0
)))
cudnn_version
=
int
(
g_command_config_args
.
get
(
"cudnn_version"
,
0
))
...
...
@@ -1880,7 +1888,7 @@ class BatchNormLayer(LayerBase):
# when either of it is non-zero.
if
input_layer
.
width
!=
0
or
input_layer
.
height
!=
0
:
self
.
set_cnn_layer
(
name
,
image_conf
.
img_size_y
,
image_conf
.
img_size
,
image_conf
.
channels
,
Tru
e
)
image_conf
.
channels
,
Fals
e
)
else
:
self
.
set_layer_size
(
input_layer
.
size
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录