Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
56f29658
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
56f29658
编写于
12月 20, 2016
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Remove not used params in GradientMachine::start
上级
2965df51
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
14 addition
and
23 deletion
+14
-23
paddle/gserver/gradientmachines/GradientMachine.h
paddle/gserver/gradientmachines/GradientMachine.h
+1
-5
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
+1
-1
paddle/gserver/gradientmachines/MultiNetwork.cpp
paddle/gserver/gradientmachines/MultiNetwork.cpp
+2
-3
paddle/gserver/gradientmachines/MultiNetwork.h
paddle/gserver/gradientmachines/MultiNetwork.h
+1
-1
paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp
paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp
+1
-5
paddle/gserver/gradientmachines/ParallelNeuralNetwork.h
paddle/gserver/gradientmachines/ParallelNeuralNetwork.h
+1
-1
paddle/gserver/tests/test_NetworkCompare.cpp
paddle/gserver/tests/test_NetworkCompare.cpp
+1
-1
paddle/gserver/tests/test_RecurrentGradientMachine.cpp
paddle/gserver/tests/test_RecurrentGradientMachine.cpp
+1
-1
paddle/trainer/Tester.cpp
paddle/trainer/Tester.cpp
+1
-1
paddle/trainer/Trainer.cpp
paddle/trainer/Trainer.cpp
+2
-2
paddle/trainer/tests/test_Compare.cpp
paddle/trainer/tests/test_Compare.cpp
+1
-1
paddle/trainer/tests/test_CompareTwoNets.cpp
paddle/trainer/tests/test_CompareTwoNets.cpp
+1
-1
未找到文件。
paddle/gserver/gradientmachines/GradientMachine.h
浏览文件 @
56f29658
...
...
@@ -212,11 +212,7 @@ public:
* @note This function will only been implemented and used in a
* multithreaded environment.
*/
virtual
void
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
)
{
(
void
)
config
;
(
void
)
dataProvider
;
}
virtual
void
start
()
{}
/**
* @brief check each work-thread whether is failed/error/finish,
...
...
paddle/gserver/gradientmachines/MultiGradientMachine.cpp
浏览文件 @
56f29658
...
...
@@ -441,7 +441,7 @@ TrainerThread::TrainerThread(const ModelConfig& config,
TrainerThread
::~
TrainerThread
()
{
stop
();
}
void
TrainerThread
::
start
()
{
gradientMachine_
->
start
(
*
(
TrainerConfig
*
)
nullptr
,
(
DataProviderPtr
)
nullptr
);
gradientMachine_
->
start
();
computeThread_
.
reset
(
new
std
::
thread
([
this
]()
{
computeThread
();
}));
...
...
paddle/gserver/gradientmachines/MultiNetwork.cpp
浏览文件 @
56f29658
...
...
@@ -109,10 +109,9 @@ void MultiNetwork::onPassEnd() {
}
}
void
MultiNetwork
::
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
)
{
void
MultiNetwork
::
start
()
{
for
(
auto
&
subNetwork
:
subNetworks_
)
{
subNetwork
->
start
(
config
,
dataProvider
);
subNetwork
->
start
();
}
}
...
...
paddle/gserver/gradientmachines/MultiNetwork.h
浏览文件 @
56f29658
...
...
@@ -54,7 +54,7 @@ public:
return
subNetworks_
;
}
virtual
void
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
);
virtual
void
start
();
virtual
void
finish
();
...
...
paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp
浏览文件 @
56f29658
...
...
@@ -131,11 +131,7 @@ void ParallelNeuralNetwork::forwardBackward(const std::vector<Argument>& inArgs,
backward
(
callback
);
}
void
ParallelNeuralNetwork
::
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
)
{
(
void
)
config
;
(
void
)
dataProvider
;
void
ParallelNeuralNetwork
::
start
()
{
for
(
auto
&
thread
:
threads_
)
{
thread
->
start
();
}
...
...
paddle/gserver/gradientmachines/ParallelNeuralNetwork.h
浏览文件 @
56f29658
...
...
@@ -56,7 +56,7 @@ public:
PassType
passType
,
const
UpdateCallback
&
callback
=
NULL
);
virtual
void
start
(
const
TrainerConfig
&
config
,
DataProviderPtr
dataProvider
);
virtual
void
start
();
void
addComputeThread
(
int
deviceId
);
...
...
paddle/gserver/tests/test_NetworkCompare.cpp
浏览文件 @
56f29658
...
...
@@ -114,7 +114,7 @@ void calcGradient(DataIn& in, DataOut& out, const std::string& configPath) {
parameters
[
i
]
->
getBuf
(
PARAMETER_VALUE
)
->
copyFrom
(
*
in
.
paraValues
[
i
]);
}
}
gradientMachine
->
start
(
trainer
.
getConfig
(),
nullptr
);
gradientMachine
->
start
();
gradientMachine
->
forward
(
in
.
inArgs
,
&
outArgs
,
PASS_TRAIN
);
for
(
size_t
i
=
0
;
i
<
in
.
outGrads
.
size
();
i
++
)
{
// If the all the layers in the config have no parameters, also
...
...
paddle/gserver/tests/test_RecurrentGradientMachine.cpp
浏览文件 @
56f29658
...
...
@@ -28,7 +28,7 @@ class TrainerForTest : public paddle::Trainer {
public:
void
startTrain
()
{
GradientMachine
&
gm
=
*
this
->
trainerInternal_
.
getGradientMachine
();
gm
.
start
(
this
->
getConfig
(),
dataProvider_
);
gm
.
start
();
}
void
finishTrain
()
{
...
...
paddle/trainer/Tester.cpp
浏览文件 @
56f29658
...
...
@@ -257,7 +257,7 @@ void Tester::test() {
CHECK
(
testDataProvider_
)
<<
"TestData is not specified"
;
testDataProvider_
->
setSkipShuffle
();
testDataProvider_
->
reset
();
gradientMachine_
->
start
(
*
config_
,
testDataProvider_
);
gradientMachine_
->
start
();
// For evaluation
std
::
vector
<
std
::
string
>
modelList
;
...
...
paddle/trainer/Trainer.cpp
浏览文件 @
56f29658
...
...
@@ -308,7 +308,7 @@ static double genPerturbation(real* d, real* grad, size_t dim) {
}
real
Trainer
::
checkGradient
()
{
trainerInternal_
.
getGradientMachine
()
->
start
(
*
config_
,
dataProvider_
);
trainerInternal_
.
getGradientMachine
()
->
start
();
std
::
vector
<
ParameterPtr
>&
parameters
=
trainerInternal_
.
getGradientMachine
()
->
getNonStaticParameters
();
DataBatch
dataBatch
;
...
...
@@ -390,7 +390,7 @@ void Trainer::startTrain() {
dataProvider_
->
reset
();
}
trainerInternal_
.
getGradientMachine
()
->
start
(
*
config_
,
dataProvider_
);
trainerInternal_
.
getGradientMachine
()
->
start
();
}
void
Trainer
::
finishTrain
()
{
trainerInternal_
.
getGradientMachine
()
->
finish
();
}
...
...
paddle/trainer/tests/test_Compare.cpp
浏览文件 @
56f29658
...
...
@@ -50,7 +50,7 @@ void calcGradient(bool useGpu, comData& Data) {
trainer
.
getDataProvider
()
->
getNextBatch
(
batchSize
,
&
dataBatch
);
CHECK
(
dataBatch
.
getSize
())
<<
"No data from data provider"
;
vector
<
Argument
>&
inArgs
=
dataBatch
.
getStreams
();
trainer
.
getGradientMachine
()
->
start
(
trainer
.
getConfig
(),
nullptr
);
trainer
.
getGradientMachine
()
->
start
();
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
trainer
.
getGradientMachine
()
->
forwardBackward
(
inArgs
,
&
Data
.
outArgs
,
PASS_TRAIN
);
...
...
paddle/trainer/tests/test_CompareTwoNets.cpp
浏览文件 @
56f29658
...
...
@@ -72,7 +72,7 @@ void calcGradient(ComData& data, const string configFile) {
CHECK
(
dataBatch
.
getSize
())
<<
"No data from data provider"
;
vector
<
Argument
>&
inArgs
=
dataBatch
.
getStreams
();
trainer
.
getGradientMachine
()
->
start
(
trainer
.
getConfig
(),
nullptr
);
trainer
.
getGradientMachine
()
->
start
();
trainer
.
getGradientMachine
()
->
forwardBackward
(
inArgs
,
&
data
.
outArgs
,
PASS_TRAIN
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录