Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
39d0b3de
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
39d0b3de
编写于
6月 10, 2017
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add test file mnist_test.py, free resource of newRemoteParameterUpdater properly
上级
c44f5dd8
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
141 addition
and
7 deletion
+141
-7
go/pserver/cclient/test/mnist_test.py
go/pserver/cclient/test/mnist_test.py
+134
-0
paddle/trainer/NewRemoteParameterUpdater.h
paddle/trainer/NewRemoteParameterUpdater.h
+7
-7
未找到文件。
go/pserver/cclient/test/mnist_test.py
0 → 100644
浏览文件 @
39d0b3de
import
paddle.v2
as
paddle
import
gzip
def
softmax_regression
(
img
):
predict
=
paddle
.
layer
.
fc
(
input
=
img
,
size
=
10
,
act
=
paddle
.
activation
.
Softmax
())
return
predict
def
multilayer_perceptron
(
img
):
# The first fully-connected layer
hidden1
=
paddle
.
layer
.
fc
(
input
=
img
,
size
=
128
,
act
=
paddle
.
activation
.
Relu
())
# The second fully-connected layer and the according activation function
hidden2
=
paddle
.
layer
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
paddle
.
activation
.
Relu
())
# The thrid fully-connected layer, note that the hidden size should be 10,
# which is the number of unique digits
predict
=
paddle
.
layer
.
fc
(
input
=
hidden2
,
size
=
10
,
act
=
paddle
.
activation
.
Softmax
())
return
predict
def
convolutional_neural_network
(
img
):
# first conv layer
conv_pool_1
=
paddle
.
networks
.
simple_img_conv_pool
(
input
=
img
,
filter_size
=
5
,
num_filters
=
20
,
num_channel
=
1
,
pool_size
=
2
,
pool_stride
=
2
,
act
=
paddle
.
activation
.
Tanh
())
# second conv layer
conv_pool_2
=
paddle
.
networks
.
simple_img_conv_pool
(
input
=
conv_pool_1
,
filter_size
=
5
,
num_filters
=
50
,
num_channel
=
20
,
pool_size
=
2
,
pool_stride
=
2
,
act
=
paddle
.
activation
.
Tanh
())
# The first fully-connected layer
fc1
=
paddle
.
layer
.
fc
(
input
=
conv_pool_2
,
size
=
128
,
act
=
paddle
.
activation
.
Tanh
())
# The softmax layer, note that the hidden size should be 10,
# which is the number of unique digits
predict
=
paddle
.
layer
.
fc
(
input
=
fc1
,
size
=
10
,
act
=
paddle
.
activation
.
Softmax
())
return
predict
def
main
():
paddle
.
init
(
use_gpu
=
False
,
trainer_count
=
1
,
trainer_id
=
1
)
# define network topology
images
=
paddle
.
layer
.
data
(
name
=
'pixel'
,
type
=
paddle
.
data_type
.
dense_vector
(
784
))
label
=
paddle
.
layer
.
data
(
name
=
'label'
,
type
=
paddle
.
data_type
.
integer_value
(
10
))
# Here we can build the prediction network in different ways. Please
# choose one by uncomment corresponding line.
predict
=
softmax_regression
(
images
)
#predict = multilayer_perceptron(images)
#predict = convolutional_neural_network(images)
cost
=
paddle
.
layer
.
classification_cost
(
input
=
predict
,
label
=
label
)
parameters
=
paddle
.
parameters
.
create
(
cost
)
optimizer
=
paddle
.
optimizer
.
Momentum
(
learning_rate
=
0.1
/
128.0
,
momentum
=
0.9
,
regularization
=
paddle
.
optimizer
.
L2Regularization
(
rate
=
0.0005
*
128
))
trainer
=
paddle
.
trainer
.
SGD
(
cost
=
cost
,
parameters
=
parameters
,
update_equation
=
optimizer
,
is_local
=
False
,
pserver_spec
=
"localhost:3000"
)
lists
=
[]
def
event_handler
(
event
):
if
isinstance
(
event
,
paddle
.
event
.
EndIteration
):
if
event
.
batch_id
%
1000
==
0
:
print
"Pass %d, Batch %d, Cost %f, %s"
%
(
event
.
pass_id
,
event
.
batch_id
,
event
.
cost
,
event
.
metrics
)
with
gzip
.
open
(
'params.tar.gz'
,
'w'
)
as
f
:
parameters
.
to_tar
(
f
)
elif
isinstance
(
event
,
paddle
.
event
.
EndPass
):
result
=
trainer
.
test
(
reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
128
))
print
"Test with Pass %d, Cost %f, %s
\n
"
%
(
event
.
pass_id
,
result
.
cost
,
result
.
metrics
)
lists
.
append
((
event
.
pass_id
,
result
.
cost
,
result
.
metrics
[
'classification_error_evaluator'
]))
trainer
.
train
(
reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
mnist
.
train
(),
buf_size
=
8192
),
batch_size
=
128
),
event_handler
=
event_handler
,
num_passes
=
100
)
# find the best pass
best
=
sorted
(
lists
,
key
=
lambda
list
:
float
(
list
[
1
]))[
0
]
print
'Best pass is %s, testing Avgcost is %s'
%
(
best
[
0
],
best
[
1
])
print
'The classification accuracy is %.2f%%'
%
(
100
-
float
(
best
[
2
])
*
100
)
test_creator
=
paddle
.
dataset
.
mnist
.
test
()
test_data
=
[]
for
item
in
test_creator
():
test_data
.
append
((
item
[
0
],
))
if
len
(
test_data
)
==
100
:
break
# output is a softmax layer. It returns probabilities.
# Shape should be (100, 10)
probs
=
paddle
.
infer
(
output_layer
=
predict
,
parameters
=
parameters
,
input
=
test_data
)
print
probs
.
shape
if
__name__
==
'__main__'
:
main
()
paddle/trainer/NewRemoteParameterUpdater.h
浏览文件 @
39d0b3de
...
@@ -32,9 +32,11 @@ public:
...
@@ -32,9 +32,11 @@ public:
NewRemoteParameterUpdater
(
const
OptimizationConfig
&
config
,
NewRemoteParameterUpdater
(
const
OptimizationConfig
&
config
,
const
std
::
string
pserverSpec
);
const
std
::
string
pserverSpec
);
~
NewRemoteParameterUpdater
()
{
~
NewRemoteParameterUpdater
()
{
LOG
(
INFO
)
<<
"~NewRemoteParameterUpdater in"
;
if
(
names_
!=
nullptr
)
{
// releaseNewParameter(newParameters_);
free
(
names_
);
// releaseNewParameter(newGradients_);
}
releaseNewParameter
(
newParameters_
);
releaseNewParameter
(
newGradients_
);
if
(
parameterClient_
>=
0
)
paddle_pserver_client_release
(
parameterClient_
);
if
(
parameterClient_
>=
0
)
paddle_pserver_client_release
(
parameterClient_
);
}
}
...
@@ -95,11 +97,9 @@ private:
...
@@ -95,11 +97,9 @@ private:
void
releaseNewParameter
(
paddle_parameter
**
newParams
)
{
void
releaseNewParameter
(
paddle_parameter
**
newParams
)
{
if
(
newParams
!=
nullptr
)
{
if
(
newParams
!=
nullptr
)
{
for
(
int
i
=
0
;
i
<
parameterSize
();
++
i
)
{
for
(
int
i
=
0
;
i
<
parameterSize
();
++
i
)
{
auto
param
=
newParams
[
i
];
free
(
newParams
[
i
]);
if
(
param
!=
nullptr
)
{
paddle_release_param
(
param
);
}
}
}
free
(
newParams
);
}
}
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录