Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
65d9e33b
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
65d9e33b
编写于
6月 19, 2017
作者:
D
dzhwinter
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
"modify config name"
上级
df5bc787
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
28 addition
and
29 deletion
+28
-29
paddle/math/tests/CMakeLists.txt
paddle/math/tests/CMakeLists.txt
+0
-1
paddle/optimizer/adam_optimizer.cc
paddle/optimizer/adam_optimizer.cc
+2
-1
paddle/optimizer/lr_policy.h
paddle/optimizer/lr_policy.h
+9
-4
paddle/optimizer/parameter_optimizer.cc
paddle/optimizer/parameter_optimizer.cc
+3
-3
paddle/optimizer/parameter_optimizer_test.cpp
paddle/optimizer/parameter_optimizer_test.cpp
+7
-9
paddle/optimizer/sgd_optimizer.cc
paddle/optimizer/sgd_optimizer.cc
+1
-5
proto/OptimizerConfig.proto
proto/OptimizerConfig.proto
+6
-6
未找到文件。
paddle/math/tests/CMakeLists.txt
浏览文件 @
65d9e33b
...
...
@@ -31,4 +31,3 @@ add_simple_unittest(test_FPException)
add_simple_unittest
(
test_GpuProfiler
)
add_simple_unittest
(
test_BaseMatrix
)
add_simple_unittest
(
test_Matrix
)
add_simple_unittest
(
test_Matrix2
)
paddle/optimizer/adam_optimizer.cc
浏览文件 @
65d9e33b
...
...
@@ -28,7 +28,8 @@ const char *AdamOptimizer::SerializeState(int *state_len) {
state
.
set_num_sample_passed
(
num_sample_passed_
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
TensorToProto
(
*
velocitys_
,
state
.
mutable_momentums
());
TensorToProto
(
*
momentums_
,
state
.
mutable_momentums
());
TensorToProto
(
*
velocitys_
,
state
.
mutable_velocitys
());
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
return
str
.
c_str
();
...
...
paddle/optimizer/lr_policy.h
浏览文件 @
65d9e33b
...
...
@@ -21,8 +21,8 @@ public:
double
LearningRate
(
const
uint64_t
num_sample_passed
)
{
return
learning_rate
;
}
const
char
*
SerializeState
(
int
*
state_len
)
;
void
DeserializeState
(
const
std
::
string
&
state
)
;
const
char
*
SerializeState
(
int
*
state_len
)
{
return
nullptr
;
}
void
DeserializeState
(
const
std
::
string
&
state
)
{}
private:
double
learning_rate
;
...
...
@@ -35,8 +35,13 @@ public:
double
LearningRate
(
const
uint64_t
num_sample_passed
)
{
return
std
::
max
(
learning_rate
-
lr_decay_a
*
num_sample_passed
,
lr_decay_b
);
}
const
char
*
SerializeState
(
int
*
state_len
);
void
DeserializeState
(
const
std
::
string
&
state
);
const
char
*
SerializeState
(
int
*
state_len
)
{
// TODO(zhihong) : add lr_policy serialization
return
nullptr
;
}
void
DeserializeState
(
const
std
::
string
&
state
)
{
// TODO(zhihong) : add lr_policy serialization
}
private:
double
learning_rate
;
...
...
paddle/optimizer/parameter_optimizer.cc
浏览文件 @
65d9e33b
...
...
@@ -13,13 +13,13 @@ namespace optimizer {
ParameterOptimizer
*
ParameterOptimizer
::
Create
(
const
std
::
string
&
config_proto
,
Tensor
*
parameter
)
{
paddle
::
OptimizerConfig
config
;
CHECK
(
config
.
ParseFromString
(
config_proto
)
==
0
)
CHECK
(
config
.
ParseFromString
(
config_proto
)
==
true
)
<<
"failed parse optimizer config"
;
auto
select_lr_policy
=
[
=
](
const
OptimizerConfig
&
config
)
->
LrPolicy
*
{
if
(
config
.
lr_policy
()
==
OptimizerConfig
::
Const
Lr
)
if
(
config
.
lr_policy
()
==
OptimizerConfig
::
Const
)
return
new
ConstLr
(
config
.
const_lr
().
learning_rate
());
if
(
config
.
lr_policy
()
==
OptimizerConfig
::
Linear
Lr
)
if
(
config
.
lr_policy
()
==
OptimizerConfig
::
Linear
)
return
new
LinearLr
(
config
.
linear_lr
().
learning_rate
(),
config
.
linear_lr
().
lr_decay_a
(),
config
.
linear_lr
().
lr_decay_b
());
...
...
paddle/optimizer/parameter_optimizer_test.cpp
浏览文件 @
65d9e33b
...
...
@@ -2,11 +2,8 @@
#include <cmath>
#include <map>
#include <vector>
#include "adadelta_optimizer.h"
#include "adagrad_optimizer.h"
#include "adam_optimizer.h"
#include "gtest/gtest.h"
#include "
sgd_optimizer
.h"
#include "
lr_policy
.h"
using
namespace
paddle
;
using
namespace
paddle
::
optimizer
;
...
...
@@ -41,12 +38,12 @@ public:
virtual
void
TearDown
()
{}
void
CreateSGD
()
{
Tensor
*
parameter
=
Fi
ll
Tensor
(
kSize
);
Tensor
*
parameter
=
Fi
xed
Tensor
(
kSize
);
config_
.
set_optimizer
(
OptimizerConfig
::
SGD
);
config_
.
mutable_sgd
()
->
set_momentum
(
0.0
);
config_
.
mutable_sgd
()
->
set_decay
(
0.0
);
config_
.
mutable_sgd
()
->
set_nesterov
(
false
);
config_
.
set_lr_policy
(
OptimizerConfig
::
Const
Lr
);
config_
.
set_lr_policy
(
OptimizerConfig
::
Const
);
config_
.
mutable_const_lr
()
->
set_learning_rate
(
0.1
);
std
::
string
str
=
config_
.
SerializeAsString
();
...
...
@@ -62,7 +59,7 @@ public:
config_
.
mutable_adam
()
->
set_beta_2
(
0.1
);
config_
.
mutable_adam
()
->
set_epsilon
(
1e-3
);
config_
.
mutable_adam
()
->
set_decay
(
0.0
);
config_
.
set_lr_policy
(
OptimizerConfig
::
Const
Lr
);
config_
.
set_lr_policy
(
OptimizerConfig
::
Const
);
config_
.
mutable_const_lr
()
->
set_learning_rate
(
0.1
);
std
::
string
str
=
config_
.
SerializeAsString
();
ParameterOptimizer
*
opt
=
ParameterOptimizer
::
Create
(
str
,
parameter
);
...
...
@@ -90,12 +87,13 @@ public:
void
TestCheckPoint
()
{
std
::
map
<
OptimizerConfig
::
Optimizer
,
int
>
expected_state_len
=
{
{
OptimizerConfig
::
SGD
,
kSize
},
{
OptimizerConfig
::
Adam
,
kSize
*
3
},
{
OptimizerConfig
::
SGD
,
kSize
*
sizeof
(
float
)
+
sizeof
(
double
)},
{
OptimizerConfig
::
Adam
,
kSize
*
3
*
sizeof
(
float
)
+
sizeof
(
double
)},
};
for
(
size_t
i
=
0
;
i
<
opts_
.
size
();
++
i
)
{
int
state_len
=
0
;
std
::
string
state
=
opts_
[
i
]
->
SerializeState
(
&
state_len
);
EXPECT_EQ
(
state_len
,
expected_state_len
[
opts_table_
[
i
]]);
EXPECT_EQ
(
state_len
,
expected_state_len
[
opts_table_
[
i
+
1
]]);
opts_
[
i
]
->
DeserializeState
(
state
);
}
}
...
...
paddle/optimizer/sgd_optimizer.cc
浏览文件 @
65d9e33b
...
...
@@ -29,11 +29,9 @@ void SGDOptimizer::Update(const Tensor *gradient) {
const
char
*
SGDOptimizer
::
SerializeState
(
int
*
state_len
)
{
SGDOptimizerState
state
;
// TODO(zhihong) : add lr_policy serialization
state
.
set_num_sample_passed
(
num_sample_passed_
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
TensorToProto
(
*
momentums_
,
state
.
mutable_momentums
());
if
(
momentum_
!=
0.0
)
TensorToProto
(
*
momentums_
,
state
.
mutable_momentums
());
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
return
str
.
c_str
();
...
...
@@ -42,9 +40,7 @@ const char *SGDOptimizer::SerializeState(int *state_len) {
void
SGDOptimizer
::
DeserializeState
(
const
std
::
string
&
str
)
{
SGDOptimizerState
state
;
state
.
ParseFromString
(
str
);
// TODO(zhihong) : add lr_policy DeserializeState
num_sample_passed_
=
state
.
num_sample_passed
();
ProtoToTensor
(
state
.
parameter
(),
parameter_
);
ProtoToTensor
(
state
.
parameter
(),
momentums_
);
}
...
...
proto/OptimizerConfig.proto
浏览文件 @
65d9e33b
...
...
@@ -53,12 +53,12 @@ message AdamConfig {
optional
double
decay
=
44
;
}
message
ConstLr
{
message
ConstLr
Config
{
// learninRate Policy
required
double
learning_rate
=
1
[
default
=
1.0
];
}
message
LinearLr
{
message
LinearLr
Config
{
// learninRate Policy
required
double
learning_rate
=
1
[
default
=
1.0
];
optional
double
lr_decay_a
=
2
;
...
...
@@ -139,12 +139,12 @@ message OptimizerConfig {
optional
AdamConfig
adam
=
6
;
enum
LrPolicy
{
Const
Lr
=
0
;
Linear
Lr
=
1
;
Const
=
0
;
Linear
=
1
;
}
required
LrPolicy
lr_policy
=
11
;
optional
ConstLr
const_lr
=
12
;
optional
LinearLr
linear_lr
=
13
;
optional
ConstLr
Config
const_lr
=
12
;
optional
LinearLr
Config
linear_lr
=
13
;
// common config of optimizer
// gradient clip when L2 exceeding value
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录