Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b54990e9
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b54990e9
编写于
10月 24, 2017
作者:
H
helinwang
提交者:
GitHub
10月 24, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #5053 from helinwang/serialization
Fix parameter server checkpoint serialization crash
上级
dd0008d5
f28b4d68
变更
17
隐藏空白更改
内联
并排
Showing
17 changed file
with
129 addition
and
42 deletion
+129
-42
go/pserver/optimizer.go
go/pserver/optimizer.go
+16
-3
go/pserver/optimizer_test.go
go/pserver/optimizer_test.go
+40
-0
go/pserver/service.go
go/pserver/service.go
+7
-0
paddle/optimizer/adadelta_optimizer.cc
paddle/optimizer/adadelta_optimizer.cc
+3
-5
paddle/optimizer/adadelta_optimizer.h
paddle/optimizer/adadelta_optimizer.h
+1
-1
paddle/optimizer/adagrad_optimizer.cc
paddle/optimizer/adagrad_optimizer.cc
+3
-5
paddle/optimizer/adagrad_optimizer.h
paddle/optimizer/adagrad_optimizer.h
+1
-1
paddle/optimizer/adam_optimizer.cc
paddle/optimizer/adam_optimizer.cc
+3
-5
paddle/optimizer/adam_optimizer.h
paddle/optimizer/adam_optimizer.h
+1
-1
paddle/optimizer/lr_policy.h
paddle/optimizer/lr_policy.h
+5
-9
paddle/optimizer/optimizer.cc
paddle/optimizer/optimizer.cc
+11
-2
paddle/optimizer/parameter_optimizer.cc
paddle/optimizer/parameter_optimizer.cc
+4
-0
paddle/optimizer/parameter_optimizer.h
paddle/optimizer/parameter_optimizer.h
+1
-1
paddle/optimizer/parameter_optimizer_test.cpp
paddle/optimizer/parameter_optimizer_test.cpp
+13
-2
paddle/optimizer/serialization_test.cpp
paddle/optimizer/serialization_test.cpp
+16
-1
paddle/optimizer/sgd_optimizer.cc
paddle/optimizer/sgd_optimizer.cc
+3
-5
paddle/optimizer/sgd_optimizer.h
paddle/optimizer/sgd_optimizer.h
+1
-1
未找到文件。
go/pserver/optimizer.go
浏览文件 @
b54990e9
...
...
@@ -72,21 +72,34 @@ func newOptimizer(paramWithConfigs ParameterWithConfig, State []byte) *optimizer
}
o
.
config
=
c
o
.
opt
=
C
.
paddle_create_optimizer
((
*
C
.
uchar
)(
&
c
[
0
]),
C
.
int
(
len
(
c
)),
C
.
paddle_element_type
(
p
.
ElementType
),
cbuffer
,
C
.
int
(
paramBufferSize
),
(
*
C
.
char
)(
cstate
),
C
.
int
(
len
(
s
)))
o
.
opt
=
C
.
paddle_create_optimizer
(
(
*
C
.
uchar
)(
&
c
[
0
]),
C
.
int
(
len
(
c
)),
C
.
paddle_element_type
(
p
.
ElementType
),
cbuffer
,
C
.
int
(
paramBufferSize
),
(
*
C
.
char
)(
cstate
),
C
.
int
(
len
(
s
)),
)
return
o
}
func
(
o
*
optimizer
)
GetWeights
()
[]
byte
{
var
buffer
unsafe
.
Pointer
// we do not own the buffer, no need to free later.
bufferLen
:=
C
.
paddle_optimizer_get_weights
(
o
.
opt
,
&
buffer
)
return
cArrayToSlice
(
buffer
,
int
(
bufferLen
)
*
C
.
sizeof_float
)
}
func
(
o
*
optimizer
)
GetStates
()
[]
byte
{
var
cbuffer
*
C
.
char
// we owns the state buffer, need to free later.
cbufferLen
:=
C
.
paddle_optimizer_get_state
(
o
.
opt
,
&
cbuffer
)
return
cArrayToSlice
(
unsafe
.
Pointer
(
cbuffer
),
int
(
cbufferLen
))
buf
:=
cArrayToSlice
(
unsafe
.
Pointer
(
cbuffer
),
int
(
cbufferLen
))
cpy
:=
make
([]
byte
,
len
(
buf
))
copy
(
cpy
,
buf
)
C
.
free
(
unsafe
.
Pointer
(
cbuffer
))
return
cpy
}
func
(
o
*
optimizer
)
UpdateParameter
(
g
Gradient
)
error
{
...
...
go/pserver/optimizer_test.go
浏览文件 @
b54990e9
...
...
@@ -15,8 +15,12 @@
package
pserver
import
(
"encoding/binary"
"io/ioutil"
"math"
"testing"
"github.com/stretchr/testify/assert"
)
func
TestOptimizerCreateRelease
(
t
*
testing
.
T
)
{
...
...
@@ -36,3 +40,39 @@ func TestOptimizerCreateRelease(t *testing.T) {
o
:=
newOptimizer
(
param
,
nil
)
o
.
Cleanup
()
}
func
float32Bytes
(
float
float32
)
[]
byte
{
bits
:=
math
.
Float32bits
(
float
)
bytes
:=
make
([]
byte
,
4
)
binary
.
LittleEndian
.
PutUint32
(
bytes
,
bits
)
return
bytes
}
func
TestOptimizerState
(
t
*
testing
.
T
)
{
p
:=
Parameter
{
Name
:
"a"
,
ElementType
:
Int32
,
}
weights
:=
float32Bytes
(
100
)
p
.
Content
=
weights
config
,
err
:=
ioutil
.
ReadFile
(
"./client/c/test/testdata/optimizer.pb"
)
if
err
!=
nil
{
t
.
Fatalf
(
"read optimizer proto failed"
)
}
param
:=
ParameterWithConfig
{
Param
:
p
,
Config
:
config
,
}
o
:=
newOptimizer
(
param
,
nil
)
s
:=
o
.
GetStates
()
// clear param content and check if the state is restored.
param
.
Param
.
Content
=
float32Bytes
(
300
)
o1
:=
newOptimizer
(
param
,
s
)
s1
:=
o1
.
GetStates
()
assert
.
Equal
(
t
,
s
,
s1
)
assert
.
Equal
(
t
,
weights
,
o
.
GetWeights
())
assert
.
Equal
(
t
,
weights
,
o1
.
GetWeights
())
o
.
Cleanup
()
o1
.
Cleanup
()
}
go/pserver/service.go
浏览文件 @
b54990e9
...
...
@@ -297,6 +297,13 @@ func (s *Service) checkpoint() (err error) {
return
}
if
_
,
err
=
os
.
Stat
(
s
.
checkpointPath
);
os
.
IsNotExist
(
err
)
{
err
=
os
.
MkdirAll
(
s
.
checkpointPath
,
os
.
ModePerm
)
if
err
!=
nil
{
return
}
}
id
:=
uuid
.
NewV4
()
.
String
()
p
:=
path
.
Join
(
s
.
checkpointPath
,
id
)
f
,
err
:=
os
.
Create
(
p
)
...
...
paddle/optimizer/adadelta_optimizer.cc
浏览文件 @
b54990e9
...
...
@@ -25,19 +25,17 @@ void AdadeltaOptimizer::Update(const Tensor* gradient) {
}
}
const
char
*
AdadeltaOptimizer
::
SerializeState
(
int
*
state_len
)
{
std
::
string
AdadeltaOptimizer
::
SerializeState
(
)
{
AdadeltaOptimizerState
state
;
state
.
set_num_sample_passed
(
num_sample_passed_
);
std
::
string
lr_str
=
this
->
lr_policy_
->
SerializeState
(
state_len
);
std
::
string
lr_str
=
this
->
lr_policy_
->
SerializeState
();
state
.
mutable_lr_state
()
->
ParseFromString
(
lr_str
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
TensorToProto
(
*
accum_gradient_
,
state
.
mutable_accum_gradient
());
TensorToProto
(
*
accum_delta_
,
state
.
mutable_accum_delta
());
TensorToProto
(
*
update_delta_
,
state
.
mutable_update_delta
());
auto
str
=
state
.
SerializeAsString
();
*
state_len
+=
str
.
size
();
return
str
.
c_str
();
return
state
.
SerializeAsString
();
}
void
AdadeltaOptimizer
::
DeserializeState
(
const
std
::
string
&
str
)
{
...
...
paddle/optimizer/adadelta_optimizer.h
浏览文件 @
b54990e9
...
...
@@ -23,7 +23,7 @@ public:
if
(
update_delta_
)
delete
update_delta_
;
}
void
Update
(
const
Tensor
*
gradient
);
const
char
*
SerializeState
(
int
*
state_len
);
std
::
string
SerializeState
(
);
void
DeserializeState
(
const
std
::
string
&
state
);
private:
...
...
paddle/optimizer/adagrad_optimizer.cc
浏览文件 @
b54990e9
...
...
@@ -17,17 +17,15 @@ void AdagradOptimizer::Update(const Tensor* gradient) {
learning_rate
*
decay_
*
param
[
i
];
}
}
const
char
*
AdagradOptimizer
::
SerializeState
(
int
*
state_len
)
{
std
::
string
AdagradOptimizer
::
SerializeState
(
)
{
AdagradOptimizerState
state
;
state
.
set_num_sample_passed
(
num_sample_passed_
);
std
::
string
lr_str
=
this
->
lr_policy_
->
SerializeState
(
state_len
);
std
::
string
lr_str
=
this
->
lr_policy_
->
SerializeState
();
state
.
mutable_lr_state
()
->
ParseFromString
(
lr_str
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
TensorToProto
(
*
accum_gradient_
,
state
.
mutable_accum_gradient
());
auto
str
=
state
.
SerializeAsString
();
*
state_len
+=
str
.
size
();
return
str
.
c_str
();
return
state
.
SerializeAsString
();
}
void
AdagradOptimizer
::
DeserializeState
(
const
std
::
string
&
str
)
{
...
...
paddle/optimizer/adagrad_optimizer.h
浏览文件 @
b54990e9
...
...
@@ -19,7 +19,7 @@ public:
if
(
accum_gradient_
)
delete
accum_gradient_
;
}
void
Update
(
const
Tensor
*
gradient
);
const
char
*
SerializeState
(
int
*
state_len
);
std
::
string
SerializeState
(
);
void
DeserializeState
(
const
std
::
string
&
state
);
private:
...
...
paddle/optimizer/adam_optimizer.cc
浏览文件 @
b54990e9
...
...
@@ -22,18 +22,16 @@ void AdamOptimizer::Update(const Tensor *gradient) {
}
}
const
char
*
AdamOptimizer
::
SerializeState
(
int
*
state_len
)
{
std
::
string
AdamOptimizer
::
SerializeState
(
)
{
AdamOptimizerState
state
;
std
::
string
lr_str
=
this
->
lr_policy_
->
SerializeState
(
state_len
);
std
::
string
lr_str
=
this
->
lr_policy_
->
SerializeState
();
state
.
mutable_lr_state
()
->
ParseFromString
(
lr_str
);
state
.
set_num_sample_passed
(
num_sample_passed_
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
TensorToProto
(
*
momentums_
,
state
.
mutable_momentums
());
TensorToProto
(
*
velocitys_
,
state
.
mutable_velocitys
());
auto
str
=
state
.
SerializeAsString
();
*
state_len
+=
str
.
size
();
return
str
.
c_str
();
return
state
.
SerializeAsString
();
}
void
AdamOptimizer
::
DeserializeState
(
const
std
::
string
&
str
)
{
...
...
paddle/optimizer/adam_optimizer.h
浏览文件 @
b54990e9
...
...
@@ -25,7 +25,7 @@ public:
if
(
velocitys_
)
delete
velocitys_
;
}
void
Update
(
const
Tensor
*
gradient
);
const
char
*
SerializeState
(
int
*
state_len
);
std
::
string
SerializeState
(
);
void
DeserializeState
(
const
std
::
string
&
state
);
private:
...
...
paddle/optimizer/lr_policy.h
浏览文件 @
b54990e9
...
...
@@ -10,7 +10,7 @@ class LrPolicy {
public:
virtual
~
LrPolicy
()
{}
virtual
double
LearningRate
(
const
uint64_t
num_sample_passed
)
=
0
;
virtual
const
char
*
SerializeState
(
int
*
state_len
)
=
0
;
virtual
std
::
string
SerializeState
(
)
=
0
;
virtual
void
DeserializeState
(
const
std
::
string
&
state
)
=
0
;
};
...
...
@@ -21,12 +21,10 @@ public:
double
LearningRate
(
const
uint64_t
num_sample_passed
)
{
return
learning_rate_
;
}
const
char
*
SerializeState
(
int
*
state_len
)
{
std
::
string
SerializeState
(
)
{
LrPolicyState
state
;
state
.
set_learning_rate
(
learning_rate_
);
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
return
str
.
c_str
();
return
state
.
SerializeAsString
();
}
void
DeserializeState
(
const
std
::
string
&
str
)
{
LrPolicyState
state
;
...
...
@@ -46,14 +44,12 @@ public:
return
std
::
max
(
learning_rate_
-
lr_decay_a_
*
num_sample_passed
,
lr_decay_b_
);
}
const
char
*
SerializeState
(
int
*
state_len
)
{
std
::
string
SerializeState
(
)
{
LrPolicyState
state
;
state
.
set_learning_rate
(
learning_rate_
);
state
.
set_lr_decay_a
(
lr_decay_a_
);
state
.
set_lr_decay_b
(
lr_decay_b_
);
auto
str
=
state
.
SerializeAsString
();
*
state_len
=
str
.
size
();
return
str
.
c_str
();
return
state
.
SerializeAsString
();
}
void
DeserializeState
(
const
std
::
string
&
str
)
{
LrPolicyState
state
;
...
...
paddle/optimizer/optimizer.cc
浏览文件 @
b54990e9
#include "optimizer.h"
#include <glog/logging.h>
#include <cstdlib>
#include <cstring>
#include <string>
#include "parameter_optimizer.h"
...
...
@@ -78,7 +81,13 @@ int paddle_optimizer_get_weights(paddle_optimizer* o, void** param_buffer) {
}
int
paddle_optimizer_get_state
(
paddle_optimizer
*
o
,
const
char
**
state
)
{
int
state_len
=
0
;
*
state
=
o
->
impl
->
SerializeState
(
&
state_len
);
std
::
string
s
=
o
->
impl
->
SerializeState
();
int
state_len
=
s
.
size
();
if
(
state_len
>
0
)
{
*
state
=
(
char
*
)
std
::
malloc
(
state_len
);
std
::
memcpy
((
void
*
)
*
state
,
(
const
void
*
)
s
.
c_str
(),
state_len
);
}
return
state_len
;
}
paddle/optimizer/parameter_optimizer.cc
浏览文件 @
b54990e9
...
...
@@ -32,6 +32,7 @@ ParameterOptimizer *ParameterOptimizer::Create(const std::string &config_proto,
Tensor
*
parameter
,
const
OptimizerConfig
&
config
)
->
ParameterOptimizer
*
{
if
(
config
.
optimizer
()
==
OptimizerConfig
::
SGD
)
{
LOG
(
INFO
)
<<
"creating SGD optimizer"
;
return
new
SGDOptimizer
(
parameter
,
lr
,
config
.
sgd
().
momentum
(),
...
...
@@ -39,6 +40,7 @@ ParameterOptimizer *ParameterOptimizer::Create(const std::string &config_proto,
config
.
sgd
().
nesterov
());
}
if
(
config
.
optimizer
()
==
OptimizerConfig
::
Adadelta
)
{
LOG
(
INFO
)
<<
"creating Adadelta optimizer"
;
return
new
AdadeltaOptimizer
(
parameter
,
lr
,
config
.
adadelta
().
rho
(),
...
...
@@ -46,10 +48,12 @@ ParameterOptimizer *ParameterOptimizer::Create(const std::string &config_proto,
config
.
adadelta
().
decay
());
}
if
(
config
.
optimizer
()
==
OptimizerConfig
::
Adagrad
)
{
LOG
(
INFO
)
<<
"creating Adagrad optimizer"
;
return
new
AdagradOptimizer
(
parameter
,
lr
,
config
.
adagrad
().
epsilon
(),
config
.
adagrad
().
decay
());
}
if
(
config
.
optimizer
()
==
OptimizerConfig
::
Adam
)
{
LOG
(
INFO
)
<<
"creating Adam optimizer"
;
return
new
AdamOptimizer
(
parameter
,
lr
,
config
.
adam
().
beta_1
(),
...
...
paddle/optimizer/parameter_optimizer.h
浏览文件 @
b54990e9
...
...
@@ -28,7 +28,7 @@ public:
Tensor
*
parameter
);
virtual
void
Update
(
const
Tensor
*
gradient
)
=
0
;
virtual
float
*
get_weight
(
int
*
param_size
)
const
;
virtual
const
char
*
SerializeState
(
int
*
state_len
)
=
0
;
virtual
std
::
string
SerializeState
(
)
=
0
;
virtual
void
DeserializeState
(
const
std
::
string
&
state
)
=
0
;
protected:
...
...
paddle/optimizer/parameter_optimizer_test.cpp
浏览文件 @
b54990e9
...
...
@@ -85,6 +85,7 @@ public:
for
(
size_t
i
=
0
;
i
<
opts_
.
size
();
++
i
)
{
int
s
=
0
;
float
*
newp
=
(
float
*
)
opts_
[
i
]
->
get_weight
(
&
s
);
EXPECT_EQ
(
s
,
kSize
);
for
(
size_t
j
=
0
;
j
<
kSize
;
++
j
)
{
EXPECT_EQ
(
newp
[
j
],
(
*
p
)[
j
]);
}
...
...
@@ -99,10 +100,20 @@ public:
}
void
TestCheckPoint
()
{
paddle
::
optimizer
::
Tensor
*
p
=
FixedTensor
(
kSize
);
for
(
size_t
i
=
0
;
i
<
opts_
.
size
();
++
i
)
{
int
state_len
=
0
;
std
::
string
state
=
opts_
[
i
]
->
SerializeState
(
&
state_len
);
auto
state
=
opts_
[
i
]
->
SerializeState
();
opts_
[
i
]
->
DeserializeState
(
state
);
auto
state1
=
opts_
[
i
]
->
SerializeState
();
opts_
[
i
]
->
DeserializeState
(
state
);
EXPECT_EQ
(
state
,
state1
);
int
s
=
0
;
float
*
newp
=
(
float
*
)
opts_
[
i
]
->
get_weight
(
&
s
);
EXPECT_EQ
(
s
,
kSize
);
for
(
size_t
j
=
0
;
j
<
kSize
;
++
j
)
{
EXPECT_EQ
(
newp
[
j
],
(
*
p
)[
j
]);
}
}
}
...
...
paddle/optimizer/serialization_test.cpp
浏览文件 @
b54990e9
...
...
@@ -21,7 +21,22 @@ TEST(TensorToProto, Case1) {
paddle
::
optimizer
::
Tensor
t
(
3
),
t1
(
3
);
for
(
size_t
i
=
0
;
i
<
t
.
size
();
++
i
)
{
t
[
i
]
=
i
;
t1
[
i
]
=
0
;
t1
[
i
]
=
10
;
}
paddle
::
TensorProto
proto
;
paddle
::
optimizer
::
TensorToProto
(
t
,
&
proto
);
paddle
::
optimizer
::
ProtoToTensor
(
proto
,
&
t1
);
for
(
size_t
i
=
0
;
i
<
t1
.
size
();
++
i
)
{
EXPECT_EQ
(
t1
[
i
],
t
[
i
]);
}
}
TEST
(
TensorToProto
,
Case2
)
{
paddle
::
optimizer
::
Tensor
t
(
1
),
t1
(
1
);
for
(
size_t
i
=
0
;
i
<
t
.
size
();
++
i
)
{
t
[
i
]
=
i
;
t1
[
i
]
=
10
;
}
paddle
::
TensorProto
proto
;
...
...
paddle/optimizer/sgd_optimizer.cc
浏览文件 @
b54990e9
...
...
@@ -27,16 +27,14 @@ void SGDOptimizer::Update(const Tensor *gradient) {
}
}
const
char
*
SGDOptimizer
::
SerializeState
(
int
*
state_len
)
{
std
::
string
SGDOptimizer
::
SerializeState
(
)
{
SGDOptimizerState
state
;
state
.
set_num_sample_passed
(
num_sample_passed_
);
std
::
string
lr_str
=
this
->
lr_policy_
->
SerializeState
(
state_len
);
std
::
string
lr_str
=
this
->
lr_policy_
->
SerializeState
();
state
.
mutable_lr_state
()
->
ParseFromString
(
lr_str
);
TensorToProto
(
*
parameter_
,
state
.
mutable_parameter
());
if
(
momentum_
!=
0.0
)
TensorToProto
(
*
momentums_
,
state
.
mutable_momentums
());
auto
str
=
state
.
SerializeAsString
();
*
state_len
+=
str
.
size
();
return
str
.
c_str
();
return
state
.
SerializeAsString
();
}
void
SGDOptimizer
::
DeserializeState
(
const
std
::
string
&
str
)
{
...
...
paddle/optimizer/sgd_optimizer.h
浏览文件 @
b54990e9
...
...
@@ -23,7 +23,7 @@ public:
if
(
momentums_
)
delete
momentums_
;
}
void
Update
(
const
Tensor
*
gradient
);
const
char
*
SerializeState
(
int
*
state_len
);
std
::
string
SerializeState
(
);
void
DeserializeState
(
const
std
::
string
&
state
);
private:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录