Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
0e300f9b
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0e300f9b
编写于
8月 23, 2017
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
use init_net and random_op to initialize parameter
上级
37cd8165
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
27 addition
and
27 deletion
+27
-27
python/paddle/v2/framework/tests/mnist.py
python/paddle/v2/framework/tests/mnist.py
+27
-27
未找到文件。
python/paddle/v2/framework/tests/mnist.py
浏览文件 @
0e300f9b
...
@@ -9,11 +9,8 @@ scope = core.Scope()
...
@@ -9,11 +9,8 @@ scope = core.Scope()
place
=
core
.
CPUPlace
()
place
=
core
.
CPUPlace
()
dev_ctx
=
core
.
DeviceContext
.
create
(
place
)
dev_ctx
=
core
.
DeviceContext
.
create
(
place
)
# init_net = core.Net.create()
init_net
=
core
.
Net
.
create
()
forward_network
=
core
.
Net
.
create
()
forward_net
=
core
.
Net
.
create
()
# should be init after forward_op is constructed
# backward_net = core.Operator.backward(forward_net, set())
backward_net
=
None
backward_net
=
None
optimize_net
=
core
.
Net
.
create
()
optimize_net
=
core
.
Net
.
create
()
...
@@ -64,13 +61,12 @@ def sgd_optimizer(net, param_name, learning_rate=0.005):
...
@@ -64,13 +61,12 @@ def sgd_optimizer(net, param_name, learning_rate=0.005):
# should use operator and add these to the init_network
# should use operator and add these to the init_network
def
init_param
(
param_name
,
dims
):
def
init_param
(
net
,
param_name
,
dims
):
var
=
scope
.
new_var
(
param_name
)
scope
.
new_var
(
param_name
)
tensor
=
var
.
get_tensor
()
op
=
Operator
(
tensor
.
set_dims
(
dims
)
"uniform_random"
,
Out
=
param_name
,
dims
=
dims
,
min
=-
0.5
,
max
=
0.5
,
seed
=
10
)
data
=
numpy
.
random
.
uniform
(
op
.
infer_shape
(
scope
)
low
=-
0.5
,
high
=
0.5
,
size
=
tensor
.
shape
()).
astype
(
"float32"
)
net
.
append_op
(
op
)
tensor
.
set
(
data
,
place
)
# fc_layer
# fc_layer
...
@@ -96,7 +92,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
...
@@ -96,7 +92,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
input_dims
=
scope
.
find_var
(
input
).
get_tensor
().
get_dims
()
input_dims
=
scope
.
find_var
(
input
).
get_tensor
().
get_dims
()
w_name
=
param
or
name
+
".w"
w_name
=
param
or
name
+
".w"
init_param
(
param_name
=
w_name
,
dims
=
[
input_dims
[
1
],
size
])
init_param
(
net
=
init_net
,
param_name
=
w_name
,
dims
=
[
input_dims
[
1
],
size
])
sgd_optimizer
(
net
=
optimize_net
,
param_name
=
w_name
,
learning_rate
=
0.01
)
sgd_optimizer
(
net
=
optimize_net
,
param_name
=
w_name
,
learning_rate
=
0.01
)
pre_activation
=
name
+
".mul.out"
pre_activation
=
name
+
".mul.out"
...
@@ -107,7 +103,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
...
@@ -107,7 +103,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
# create bias variable if needed
# create bias variable if needed
if
bias
:
if
bias
:
bias_name
=
name
+
".b"
bias_name
=
name
+
".b"
init_param
(
param_name
=
bias_name
,
dims
=
[
size
])
init_param
(
net
=
init_net
,
param_name
=
bias_name
,
dims
=
[
size
])
sgd_optimizer
(
sgd_optimizer
(
net
=
optimize_net
,
param_name
=
bias_name
,
learning_rate
=
0.001
)
net
=
optimize_net
,
param_name
=
bias_name
,
learning_rate
=
0.001
)
bias_out
=
name
+
".rowwise_add.out"
bias_out
=
name
+
".rowwise_add.out"
...
@@ -181,20 +177,22 @@ def error_rate(predict, label):
...
@@ -181,20 +177,22 @@ def error_rate(predict, label):
images
=
data_layer
(
name
=
'pixel'
,
dims
=
[
BATCH_SIZE
,
784
])
images
=
data_layer
(
name
=
'pixel'
,
dims
=
[
BATCH_SIZE
,
784
])
labels
=
data_layer
(
name
=
'label'
,
dims
=
[
BATCH_SIZE
])
labels
=
data_layer
(
name
=
'label'
,
dims
=
[
BATCH_SIZE
])
fc1
=
fc_layer
(
net
=
forward_network
,
input
=
images
,
size
=
100
,
act
=
"sigmoid"
)
fc1
=
fc_layer
(
net
=
forward_net
,
input
=
images
,
size
=
100
,
act
=
"sigmoid"
)
fc2
=
fc_layer
(
net
=
forward_network
,
input
=
fc1
,
size
=
100
,
act
=
"sigmoid"
)
fc2
=
fc_layer
(
net
=
forward_net
,
input
=
fc1
,
size
=
100
,
act
=
"sigmoid"
)
predict
=
fc_layer
(
net
=
forward_network
,
input
=
fc2
,
size
=
100
,
act
=
"softmax"
)
predict
=
fc_layer
(
net
=
forward_net
,
input
=
fc2
,
size
=
100
,
act
=
"softmax"
)
cost
=
cross_entropy_layer
(
net
=
forward_network
,
input
=
predict
,
label
=
labels
)
cost
=
cross_entropy_layer
(
net
=
forward_net
,
input
=
predict
,
label
=
labels
)
forward_network
.
complete_add_op
(
True
)
init_net
.
complete_add_op
(
True
)
backward_net
=
create_backward_net
(
forward_network
)
forward_net
.
complete_add_op
(
True
)
backward_net
=
create_backward_net
(
forward_net
)
optimize_net
.
complete_add_op
(
True
)
optimize_net
.
complete_add_op
(
True
)
print
(
forward_network
)
print
(
init_net
)
print
(
forward_net
)
print
(
backward_net
)
print
(
backward_net
)
print
(
optimize_net
)
print
(
optimize_net
)
debug_print_op
(
forward_net
work
)
debug_print_op
(
forward_net
)
debug_print_op
(
backward_net
)
debug_print_op
(
backward_net
)
debug_print_op
(
optimize_net
)
debug_print_op
(
optimize_net
)
...
@@ -215,8 +213,8 @@ def test(cost_name):
...
@@ -215,8 +213,8 @@ def test(cost_name):
feed_data
(
images
,
image_data
)
feed_data
(
images
,
image_data
)
feed_data
(
labels
,
label_data
)
feed_data
(
labels
,
label_data
)
forward_net
work
.
infer_shape
(
scope
)
forward_net
.
infer_shape
(
scope
)
forward_net
work
.
run
(
scope
,
dev_ctx
)
forward_net
.
run
(
scope
,
dev_ctx
)
cost
.
append
(
mean_cost
(
cost_name
))
cost
.
append
(
mean_cost
(
cost_name
))
error
.
append
(
error_rate
(
predict
,
"label"
))
error
.
append
(
error_rate
(
predict
,
"label"
))
print
(
"cost="
+
str
(
sum
(
cost
)
/
float
(
len
(
cost
)))
+
" error_rate="
+
str
(
print
(
"cost="
+
str
(
sum
(
cost
)
/
float
(
len
(
cost
)))
+
" error_rate="
+
str
(
...
@@ -224,6 +222,8 @@ def test(cost_name):
...
@@ -224,6 +222,8 @@ def test(cost_name):
PASS_NUM
=
1
PASS_NUM
=
1
init_net
.
run
(
scope
,
dev_ctx
)
for
pass_id
in
range
(
PASS_NUM
):
for
pass_id
in
range
(
PASS_NUM
):
batch_id
=
0
batch_id
=
0
...
@@ -233,8 +233,8 @@ for pass_id in range(PASS_NUM):
...
@@ -233,8 +233,8 @@ for pass_id in range(PASS_NUM):
feed_data
(
images
,
image_data
)
feed_data
(
images
,
image_data
)
feed_data
(
labels
,
label_data
)
feed_data
(
labels
,
label_data
)
forward_net
work
.
infer_shape
(
scope
)
forward_net
.
infer_shape
(
scope
)
forward_net
work
.
run
(
scope
,
dev_ctx
)
forward_net
.
run
(
scope
,
dev_ctx
)
set_cost
(
cost
)
set_cost
(
cost
)
backward_net
.
infer_shape
(
scope
)
backward_net
.
infer_shape
(
scope
)
backward_net
.
run
(
scope
,
dev_ctx
)
backward_net
.
run
(
scope
,
dev_ctx
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录