Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
6935dd7b
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6935dd7b
编写于
7月 04, 2017
作者:
D
dongzhihong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
"lr state serialization"
上级
f448edf1
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
47 addition
and
30 deletion
+47
-30
paddle/optimizer/lr_policy.h
paddle/optimizer/lr_policy.h
+33
-13
paddle/optimizer/sgd_optimizer.cc
paddle/optimizer/sgd_optimizer.cc
+2
-2
proto/OptimizerConfig.proto
proto/OptimizerConfig.proto
+12
-15
未找到文件。
paddle/optimizer/lr_policy.h
浏览文件 @
6935dd7b
...
...
@@ -19,34 +19,54 @@ class ConstLr final : public LrPolicy {
public:
ConstLr
(
double
lr
)
:
learning_rate
(
lr
){};
double
LearningRate
(
const
uint64_t
num_sample_passed
)
{
return
learning_rate
;
return
learning_rate_
;
}
const
char
*
SerializeState
(
int
*
state_len
)
{
LrPolicyState
state
;
state
.
set_learning_rate
(
learning_rate_
);
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
return
str
.
c_str
();
}
void
DeserializeState
(
const
std
::
string
&
state
)
{
LrPolicyState
state
;
state
.
ParseFromString
(
str
);
learning_rate_
=
state
.
learning_rate
();
}
const
char
*
SerializeState
(
int
*
state_len
)
{
return
nullptr
;
}
void
DeserializeState
(
const
std
::
string
&
state
)
{}
private:
double
learning_rate
;
double
learning_rate
_
;
};
class
LinearLr
final
:
public
LrPolicy
{
public:
LinearLr
(
double
lr
,
double
lr_decay_a
,
double
lr_decay_b
)
:
learning_rate
(
lr
),
lr_decay_a
(
lr_decay_a
),
lr_decay_b
(
lr_decay_b
)
{}
:
learning_rate
_
(
lr
),
lr_decay_a_
(
lr_decay_a
),
lr_decay_b_
(
lr_decay_b
)
{}
double
LearningRate
(
const
uint64_t
num_sample_passed
)
{
return
std
::
max
(
learning_rate
-
lr_decay_a
*
num_sample_passed
,
lr_decay_b
);
return
std
::
max
(
learning_rate_
-
lr_decay_a_
*
num_sample_passed
,
lr_decay_b_
);
}
const
char
*
SerializeState
(
int
*
state_len
)
{
// TODO(zhihong) : add lr_policy serialization
return
nullptr
;
LrPolicyState
state
;
state
.
set_learning_rate
(
learning_rate_
);
state
.
set_lr_decay_a
(
lr_decay_a_
);
state
.
set_lr_decay_b
(
lr_decay_b_
);
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
return
str
.
c_str
();
}
void
DeserializeState
(
const
std
::
string
&
state
)
{
// TODO(zhihong) : add lr_policy serialization
void
DeserializeState
(
const
std
::
string
&
str
)
{
LrPolicyState
state
;
state
.
ParseFromString
(
str
);
learning_rate_
=
state
.
learning_rate
();
lr_decay_a_
=
state
.
lr_decay_a
();
lr_decay_b_
=
state
.
lr_decay_b
();
}
private:
double
learning_rate
;
double
lr_decay_a
;
double
lr_decay_b
;
double
learning_rate
_
;
double
lr_decay_a
_
;
double
lr_decay_b
_
;
};
}
// namespace optimizer
...
...
paddle/optimizer/sgd_optimizer.cc
浏览文件 @
6935dd7b
...
...
@@ -30,10 +30,10 @@ void SGDOptimizer::Update(const Tensor *gradient) {
const
char
*
SGDOptimizer
::
SerializeState
(
int
*
state_len
)
{
SGDOptimizerState
state
;
state
.
set_num_sample_passed
(
num_sample_passed_
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
state
.
set_lr_
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
if
(
momentum_
!=
0.0
)
TensorToProto
(
*
momentums_
,
state
.
mutable_momentums
());
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
*
state_len
+
=
str
.
size
();
return
str
.
c_str
();
}
...
...
proto/OptimizerConfig.proto
浏览文件 @
6935dd7b
...
...
@@ -78,11 +78,15 @@ enum DataType {
repeated
bytes
content
=
2
;
}
message
LrPolicyState
{
// learninRate Policy
optional
double
learning_rate
=
1
[
default
=
1.0
];
optional
double
lr_decay_a
=
2
;
optional
double
lr_decay_b
=
3
;
}
message
SGDOptimizerState
{
// learning rate policy
optional
double
learning_rate
=
101
;
optional
double
lr_decay_a
=
102
;
optional
double
lr_decay_b
=
103
;
optional
LrPolicyState
lrstate
=
101
;
optional
double
num_sample_passed
=
104
;
// state
optional
TensorProto
parameter
=
1
;
...
...
@@ -91,9 +95,7 @@ message SGDOptimizerState {
message
AdadeltaOptimizerState
{
// learning rate policy
optional
double
learning_rate
=
101
;
optional
double
lr_decay_a
=
102
;
optional
double
lr_decay_b
=
103
;
optional
LrPolicyState
lrstate
=
101
;
optional
double
num_sample_passed
=
104
;
// state
optional
TensorProto
parameter
=
1
;
...
...
@@ -102,11 +104,9 @@ message AdadeltaOptimizerState {
optional
TensorProto
update_delta
=
4
;
}
message
AdagradOptimizerState
{
// learning rate policy
optional
double
learning_rate
=
101
;
optional
double
lr_decay_a
=
102
;
optional
double
lr_decay_b
=
103
;
optional
LrPolicyState
lrstate
=
101
;
optional
double
num_sample_passed
=
104
;
// state
optional
TensorProto
parameter
=
1
;
...
...
@@ -114,10 +114,7 @@ message AdagradOptimizerState {
}
message
AdamOptimizerState
{
// learning rate policy
optional
double
learning_rate
=
101
;
optional
double
lr_decay_a
=
102
;
optional
double
lr_decay_b
=
103
;
optional
LrPolicyState
lrstate
=
101
;
optional
double
num_sample_passed
=
104
;
// state
optional
TensorProto
parameter
=
1
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录