Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
暴走的小小菜鸟
Paddle
提交
118dd149
P
Paddle
项目概览
暴走的小小菜鸟
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
118dd149
编写于
8月 21, 2017
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
can run, for debug
上级
f6dd7876
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
66 addition
and
8 deletion
+66
-8
python/paddle/v2/framework/tests/CMakeLists.txt
python/paddle/v2/framework/tests/CMakeLists.txt
+1
-0
python/paddle/v2/framework/tests/mnist.py
python/paddle/v2/framework/tests/mnist.py
+65
-8
未找到文件。
python/paddle/v2/framework/tests/CMakeLists.txt
浏览文件 @
118dd149
...
...
@@ -27,3 +27,4 @@ py_test(test_uniform_random_op SRCS test_uniform_random_op.py)
py_test
(
test_recurrent_op SRCS test_recurrent_op.py
)
py_test
(
test_sgd_op SRCS test_sgd_op.py
)
py_test
(
test_gradient_checker SRCS test_gradient_checker.py
)
py_test
(
mnist SRCS mnist.py
)
python/paddle/v2/framework/tests/mnist.py
浏览文件 @
118dd149
...
...
@@ -2,7 +2,7 @@ import paddle.v2.framework.core as core
from
paddle.v2.framework.op
import
Operator
import
numpy
BATCH_SIZE
=
100
BATCH_SIZE
=
2
scope
=
core
.
Scope
()
place
=
core
.
CPUPlace
()
...
...
@@ -35,10 +35,15 @@ def data_layer(name, dims):
def
feed_data
(
name
,
data
):
assert
isinstance
(
data
,
numpy
.
array
)
assert
isinstance
(
data
,
numpy
.
nd
array
)
tensor
=
scope
.
find_var
(
name
).
get_tensor
()
tensor
.
set_dims
(
data
.
shape
)
tensor
.
alloc_float
(
place
)
if
data
.
dtype
==
numpy
.
dtype
(
'int32'
):
tensor
.
alloc_float
(
place
)
elif
data
.
dtype
==
numpy
.
dtype
(
'float32'
):
tensor
.
alloc_int
(
place
)
else
:
raise
ValueError
(
"data type not supported"
)
tensor
.
set
(
data
,
place
)
...
...
@@ -49,7 +54,11 @@ def grad_var_name(var_name):
def
sgd_optimizer
(
net
,
param_name
,
learning_rate
=
0.01
):
grad_name
=
grad_var_name
(
param_name
)
optimize_op
=
Operator
(
"sgd"
,
param
=
param_name
,
grad
=
grad_name
,
learning_rate
=
learning_rate
)
"sgd"
,
param
=
param_name
,
grad
=
grad_name
,
param_out
=
param_name
,
learning_rate
=
learning_rate
)
net
.
add_op
(
optimize_op
)
...
...
@@ -65,7 +74,7 @@ def init_param(param_name, dims):
# fc_layer
def
fc_layer
(
net
,
input
,
size
,
act
=
"s
igmoid
"
,
bias
=
True
,
param
=
None
,
name
=
None
):
def
fc_layer
(
net
,
input
,
size
,
act
=
"s
oftmax
"
,
bias
=
True
,
param
=
None
,
name
=
None
):
"""
Add a fc layer to net
...
...
@@ -125,16 +134,64 @@ def cross_entropy_layer(net, input, label):
return
cost_name
def
get_backward_net
(
forward_net
):
net
=
core
.
Operator
.
backward
(
forward_net
,
set
())
for
input
in
net
.
inputs
()[
"all"
]:
var
=
scope
.
new_var
(
input
)
var
.
get_tensor
()
for
output
in
net
.
outputs
()[
"all"
]:
var
=
scope
.
new_var
(
output
)
var
.
get_tensor
()
return
net
def
print_inputs_outputs
(
op
):
print
(
"==============="
+
op
.
type
()
+
"=============="
)
print
(
"***inputs:***"
)
for
input
in
op
.
inputs
()[
"all"
]:
print
input
,
scope
.
find_var
(
input
).
get_tensor
().
get_dims
()
print
(
"***outputs:***"
)
for
output
in
op
.
outputs
()[
"all"
]:
print
output
,
scope
.
find_var
(
output
).
get_tensor
().
get_dims
()
print
(
""
)
print
(
""
)
images
=
data_layer
(
name
=
'pixel'
,
dims
=
[
BATCH_SIZE
,
784
])
label
=
data_layer
(
name
=
'label'
,
dims
=
[
BATCH_SIZE
])
fc
=
fc_layer
(
net
=
forward_network
,
input
=
images
,
size
=
10
,
act
=
"softmax"
)
cost
=
cross_entropy_layer
(
net
=
forward_network
,
input
=
fc
,
label
=
label
)
forward_network
.
complete_add_op
(
True
)
print
(
forward_network
)
backward_net
=
core
.
Operator
.
backward
(
forward_network
,
set
())
backward_net
=
get_backward_net
(
forward_network
)
print
(
backward_net
)
optimize_net
.
complete_add_op
(
True
)
print
(
optimize_net
)
PASS_NUM
=
10
for
pass_id
in
range
(
PASS_NUM
):
print
pass_id
print
(
"===========forward=========="
)
feed_data
(
"pixel"
,
numpy
.
random
.
random
((
BATCH_SIZE
,
784
)).
astype
(
'float32'
))
feed_data
(
"label"
,
numpy
.
ones
(
BATCH_SIZE
).
astype
(
"int32"
))
forward_network
.
infer_shape
(
scope
)
print_inputs_outputs
(
forward_network
)
print
(
numpy
.
array
(
scope
.
find_var
(
"label"
).
get_tensor
()))
forward_network
.
run
(
scope
,
dev_ctx
)
# print(numpy.array(scope.find_var("fc_0").get_tensor()))
print
(
"===========backward=========="
)
cost_data
=
numpy
.
array
(
scope
.
find_var
(
"cross_entropy_1"
).
get_tensor
())
cost_grad
=
scope
.
find_var
(
grad_var_name
(
"cross_entropy_1"
)).
get_tensor
()
cost_grad
.
set_dims
(
cost_data
.
shape
)
cost_grad
.
alloc_float
(
place
)
cost_grad
.
set
(
cost_data
,
place
)
backward_net
.
infer_shape
(
scope
)
print_inputs_outputs
(
backward_net
)
backward_net
.
run
(
scope
,
dev_ctx
)
print
(
"===========optimize_net=========="
)
print_inputs_outputs
(
optimize_net
)
optimize_net
.
run
(
scope
,
dev_ctx
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录