Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
1814fc29
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1814fc29
编写于
6月 14, 2017
作者:
D
dzhwinter
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
"fix lr_policy serialization"
上级
b72e8aa3
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
42 addition
and
60 deletion
+42
-60
paddle/optimizer/CMakeLists.txt
paddle/optimizer/CMakeLists.txt
+0
-1
paddle/optimizer/adadelta_optimizer.cc
paddle/optimizer/adadelta_optimizer.cc
+5
-6
paddle/optimizer/adadelta_optimizer.h
paddle/optimizer/adadelta_optimizer.h
+5
-9
paddle/optimizer/adagrad_optimizer.cc
paddle/optimizer/adagrad_optimizer.cc
+5
-4
paddle/optimizer/adagrad_optimizer.h
paddle/optimizer/adagrad_optimizer.h
+4
-5
paddle/optimizer/adam_optimizer.cc
paddle/optimizer/adam_optimizer.cc
+5
-5
paddle/optimizer/adam_optimizer.h
paddle/optimizer/adam_optimizer.h
+3
-5
paddle/optimizer/lr_policy.h
paddle/optimizer/lr_policy.h
+6
-7
paddle/optimizer/parameter_optimizer.h
paddle/optimizer/parameter_optimizer.h
+4
-1
paddle/optimizer/serialization.h
paddle/optimizer/serialization.h
+0
-12
paddle/optimizer/sgd_optimizer.cc
paddle/optimizer/sgd_optimizer.cc
+5
-4
paddle/optimizer/sgd_optimizer.h
paddle/optimizer/sgd_optimizer.h
+0
-1
未找到文件。
paddle/optimizer/CMakeLists.txt
浏览文件 @
1814fc29
...
...
@@ -12,7 +12,6 @@ set(OPITMIZER_SRCS
add_library
(
optimizer STATIC
${
OPITMIZER_SRCS
}
)
add_dependencies
(
optimizer gen_proto_cpp
)
add_simple_unittest
(
tensor_test
)
add_simple_unittest
(
serialization_test
)
add_simple_unittest
(
parameter_optimizer_test
)
add_dependencies
(
parameter_optimizer_test optimizer
)
paddle/optimizer/adadelta_optimizer.cc
浏览文件 @
1814fc29
...
...
@@ -27,23 +27,22 @@ void AdadeltaOptimizer::Update(const Tensor* gradient) {
const
char
*
AdadeltaOptimizer
::
SerializeState
(
int
*
state_len
)
{
AdadeltaOptimizerState
state
;
state
.
set_learning_rate
(
lr_policy_
->
LearningRate
(
num_sample_passed_
));
// TODO(zhihong) : add lr_policy serialization
state
.
set_num_sample_passed
(
num_sample_passed_
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
TensorToProto
(
*
accum_gradient_
,
state
.
mutable_accum_gradient
());
TensorToProto
(
*
accum_delta_
,
state
.
mutable_accum_delta
());
TensorToProto
(
*
update_delta_
,
state
.
mutable_update_delta
());
*
state_len
=
CalStateSize
(
parameter_
,
accum_gradient_
,
accum_delta_
,
update_delta_
);
return
state
.
SerializeAsString
().
c_str
();
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
return
str
.
c_str
();
}
void
AdadeltaOptimizer
::
DeserializeState
(
const
std
::
string
&
str
)
{
AdadeltaOptimizerState
state
;
state
.
ParseFromString
(
str
);
lr_policy_
->
set
(
state
.
learning_rate
());
// TODO(zhihong) : add lr_policy DeserializeState
num_sample_passed_
=
state
.
num_sample_passed
();
ProtoToTensor
(
state
.
parameter
(),
parameter_
);
...
...
paddle/optimizer/adadelta_optimizer.h
浏览文件 @
1814fc29
...
...
@@ -10,17 +10,13 @@ public:
AdadeltaOptimizer
(
Tensor
*
parameter
,
LrPolicy
*
lr
,
double
rho
,
double
epsilon
,
double
decay
)
:
ParameterOptimizer
(
parameter
,
lr
),
accum_gradient_
(
new
Tensor
(
parameter
->
size
())),
accum_delta_
(
new
Tensor
(
parameter
->
size
())),
update_delta_
(
new
Tensor
(
parameter
->
size
())),
rho_
(
rho
),
epsilon_
(
epsilon
),
decay_
(
decay
)
{
size_t
size
=
parameter
->
size
();
if
(
accum_gradient_
)
delete
accum_gradient_
;
accum_gradient_
=
new
Tensor
(
size
);
if
(
accum_delta_
)
delete
accum_delta_
;
accum_delta_
=
new
Tensor
(
size
);
if
(
update_delta_
)
delete
update_delta_
;
update_delta_
=
new
Tensor
(
size
);
}
decay_
(
decay
)
{}
~
AdadeltaOptimizer
()
{
if
(
accum_gradient_
)
delete
accum_gradient_
;
if
(
accum_delta_
)
delete
accum_delta_
;
...
...
paddle/optimizer/adagrad_optimizer.cc
浏览文件 @
1814fc29
...
...
@@ -19,19 +19,20 @@ void AdagradOptimizer::Update(const Tensor* gradient) {
}
const
char
*
AdagradOptimizer
::
SerializeState
(
int
*
state_len
)
{
AdagradOptimizerState
state
;
state
.
set_learning_rate
(
lr_policy_
->
LearningRate
(
num_sample_passed_
));
// TODO(zhihong) : add lr_policy serialization
state
.
set_num_sample_passed
(
num_sample_passed_
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
TensorToProto
(
*
accum_gradient_
,
state
.
mutable_accum_gradient
());
*
state_len
=
CalStateSize
(
parameter_
,
accum_gradient_
);
return
state
.
SerializeAsString
().
c_str
();
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
return
str
.
c_str
();
}
void
AdagradOptimizer
::
DeserializeState
(
const
std
::
string
&
str
)
{
AdagradOptimizerState
state
;
state
.
ParseFromString
(
str
);
lr_policy_
->
set
(
state
.
learning_rate
());
// TODO(zhihong) : add lr_policy DeserializeState
num_sample_passed_
=
state
.
num_sample_passed
();
ProtoToTensor
(
state
.
parameter
(),
parameter_
);
ProtoToTensor
(
state
.
accum_gradient
(),
accum_gradient_
);
...
...
paddle/optimizer/adagrad_optimizer.h
浏览文件 @
1814fc29
...
...
@@ -11,11 +11,10 @@ public:
LrPolicy
*
lr
,
double
epsilon
,
double
decay
)
:
ParameterOptimizer
(
parameter
,
lr
),
epsilon_
(
epsilon
),
decay_
(
decay
)
{
size_t
size
=
parameter
->
size
();
if
(
accum_gradient_
)
delete
accum_gradient_
;
accum_gradient_
=
new
Tensor
(
size
);
}
:
ParameterOptimizer
(
parameter
,
lr
),
accum_gradient_
(
new
Tensor
(
parameter
->
size
())),
epsilon_
(
epsilon
),
decay_
(
decay
)
{}
~
AdagradOptimizer
()
{
if
(
accum_gradient_
)
delete
accum_gradient_
;
}
...
...
paddle/optimizer/adam_optimizer.cc
浏览文件 @
1814fc29
...
...
@@ -24,20 +24,20 @@ void AdamOptimizer::Update(const Tensor *gradient) {
const
char
*
AdamOptimizer
::
SerializeState
(
int
*
state_len
)
{
AdamOptimizerState
state
;
state
.
set_learning_rate
(
lr_policy_
->
LearningRate
(
num_sample_passed_
));
// TODO(zhihong) : add lr_policy serialization
state
.
set_num_sample_passed
(
num_sample_passed_
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
TensorToProto
(
*
velocitys_
,
state
.
mutable_momentums
());
*
state_len
=
CalStateSize
(
parameter_
,
momentums_
,
velocitys_
);
return
st
ate
.
SerializeAsString
()
.
c_str
();
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
(
);
return
st
r
.
c_str
();
}
void
AdamOptimizer
::
DeserializeState
(
const
std
::
string
&
str
)
{
AdamOptimizerState
state
;
state
.
ParseFromString
(
str
);
lr_policy_
->
set
(
state
.
learning_rate
());
// TODO(zhihong) : add lr_policy DeserializeState
num_sample_passed_
=
state
.
num_sample_passed
();
ProtoToTensor
(
state
.
parameter
(),
parameter_
);
...
...
paddle/optimizer/adam_optimizer.h
浏览文件 @
1814fc29
...
...
@@ -14,14 +14,12 @@ public:
double
epsilon
,
double
decay
)
:
ParameterOptimizer
(
parameter
,
lr
),
momentums_
(
new
Tensor
(
parameter
->
size
())),
velocitys_
(
new
Tensor
(
parameter
->
size
())),
beta_1_
(
beta_1
),
beta_2_
(
beta_2
),
epsilon_
(
epsilon
),
decay_
(
decay
)
{
size_t
size
=
parameter
->
size
();
momentums_
=
new
Tensor
(
size
);
velocitys_
=
new
Tensor
(
size
);
}
decay_
(
decay
)
{}
~
AdamOptimizer
()
{
if
(
momentums_
)
delete
momentums_
;
if
(
velocitys_
)
delete
velocitys_
;
...
...
paddle/optimizer/lr_policy.h
浏览文件 @
1814fc29
...
...
@@ -10,7 +10,8 @@ class LrPolicy {
public:
virtual
~
LrPolicy
()
{}
virtual
double
LearningRate
(
const
uint64_t
num_sample_passed
)
=
0
;
virtual
void
set
(
double
current_learning_rate
)
=
0
;
virtual
const
char
*
SerializeState
(
int
*
state_len
)
=
0
;
virtual
void
DeserializeState
(
const
std
::
string
&
state
)
=
0
;
};
// constant learning rate policy
...
...
@@ -20,9 +21,8 @@ public:
double
LearningRate
(
const
uint64_t
num_sample_passed
)
{
return
learning_rate
;
}
void
set
(
double
current_learning_rate
)
{
learning_rate
=
current_learning_rate
;
}
const
char
*
SerializeState
(
int
*
state_len
);
void
DeserializeState
(
const
std
::
string
&
state
);
private:
double
learning_rate
;
...
...
@@ -35,9 +35,8 @@ public:
double
LearningRate
(
const
uint64_t
num_sample_passed
)
{
return
std
::
max
(
learning_rate
-
lr_decay_a
*
num_sample_passed
,
lr_decay_b
);
}
void
set
(
double
current_learning_rate
)
{
learning_rate
=
current_learning_rate
;
}
const
char
*
SerializeState
(
int
*
state_len
);
void
DeserializeState
(
const
std
::
string
&
state
);
private:
double
learning_rate
;
...
...
paddle/optimizer/parameter_optimizer.h
浏览文件 @
1814fc29
...
...
@@ -19,7 +19,10 @@ public:
*/
ParameterOptimizer
(
Tensor
*
parameter
,
LrPolicy
*
lr
)
:
parameter_
(
parameter
),
lr_policy_
(
lr
),
num_sample_passed_
(
0
)
{}
virtual
~
ParameterOptimizer
()
{
delete
parameter_
;
};
virtual
~
ParameterOptimizer
()
{
delete
parameter_
;
delete
lr_policy_
;
}
static
ParameterOptimizer
*
Create
(
const
std
::
string
&
config_proto
,
Tensor
*
parameter
);
...
...
paddle/optimizer/serialization.h
浏览文件 @
1814fc29
...
...
@@ -10,18 +10,6 @@
namespace
paddle
{
namespace
optimizer
{
static
unsigned
CalStateSize
()
{
return
0
;
}
template
<
typename
HEAD
,
typename
...
TAIL
>
unsigned
CalStateSize
(
const
HEAD
&
head
,
const
TAIL
&
...
tail
)
{
return
sizeof
head
+
CalStateSize
(
tail
...);
}
template
<
typename
...
TAIL
>
unsigned
CalStateSize
(
const
Tensor
*
head
,
const
TAIL
&
...
tail
)
{
return
head
->
size
()
+
CalStateSize
(
tail
...);
}
static
void
TensorToProto
(
const
Tensor
&
tensor
,
TensorProto
*
proto
)
{
proto
->
set_data_type
(
TensorProto
::
PADDLE_ELEMENT_TYPE_FLOAT32
);
std
::
stringstream
os
;
...
...
paddle/optimizer/sgd_optimizer.cc
浏览文件 @
1814fc29
...
...
@@ -29,19 +29,20 @@ void SGDOptimizer::Update(const Tensor *gradient) {
const
char
*
SGDOptimizer
::
SerializeState
(
int
*
state_len
)
{
SGDOptimizerState
state
;
state
.
set_learning_rate
(
lr_policy_
->
LearningRate
(
num_sample_passed_
));
// TODO(zhihong) : add lr_policy serialization
state
.
set_num_sample_passed
(
num_sample_passed_
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
TensorToProto
(
*
momentums_
,
state
.
mutable_momentums
());
*
state_len
=
CalStateSize
(
parameter_
,
momentums_
);
return
state
.
SerializeAsString
().
c_str
();
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
return
str
.
c_str
();
}
void
SGDOptimizer
::
DeserializeState
(
const
std
::
string
&
str
)
{
SGDOptimizerState
state
;
state
.
ParseFromString
(
str
);
lr_policy_
->
set
(
state
.
learning_rate
());
// TODO(zhihong) : add lr_policy DeserializeState
num_sample_passed_
=
state
.
num_sample_passed
();
ProtoToTensor
(
state
.
parameter
(),
parameter_
);
...
...
paddle/optimizer/sgd_optimizer.h
浏览文件 @
1814fc29
...
...
@@ -16,7 +16,6 @@ public:
if
(
momentum_
!=
0.0
)
{
size_t
size
=
parameter
->
size
();
// TODO: fix it with align aware allocator bind to Tensor
if
(
momentums_
)
delete
momentums_
;
momentums_
=
new
Tensor
(
size
);
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录