Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
51d4afaa
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
51d4afaa
编写于
11月 04, 2017
作者:
Y
Yu Yang
提交者:
GitHub
11月 04, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Rename program->main_program, init_program->startup_program (#5360)
上级
e745bcfc
变更
24
隐藏空白更改
内联
并排
Showing
24 changed file
with
486 addition
and
418 deletion
+486
-418
python/paddle/v2/framework/framework.py
python/paddle/v2/framework/framework.py
+2
-2
python/paddle/v2/framework/io.py
python/paddle/v2/framework/io.py
+35
-29
python/paddle/v2/framework/layer_helper.py
python/paddle/v2/framework/layer_helper.py
+15
-15
python/paddle/v2/framework/layers.py
python/paddle/v2/framework/layers.py
+30
-29
python/paddle/v2/framework/net_drawer.py
python/paddle/v2/framework/net_drawer.py
+3
-3
python/paddle/v2/framework/nets.py
python/paddle/v2/framework/nets.py
+22
-22
python/paddle/v2/framework/optimizer.py
python/paddle/v2/framework/optimizer.py
+7
-5
python/paddle/v2/framework/tests/test_executor_and_mul.py
python/paddle/v2/framework/tests/test_executor_and_mul.py
+2
-2
python/paddle/v2/framework/tests/test_fit_a_line.py
python/paddle/v2/framework/tests/test_fit_a_line.py
+20
-16
python/paddle/v2/framework/tests/test_image_classification_layer.py
...dle/v2/framework/tests/test_image_classification_layer.py
+35
-31
python/paddle/v2/framework/tests/test_image_classification_train.py
...dle/v2/framework/tests/test_image_classification_train.py
+66
-50
python/paddle/v2/framework/tests/test_inference_model_io.py
python/paddle/v2/framework/tests/test_inference_model_io.py
+10
-10
python/paddle/v2/framework/tests/test_layers.py
python/paddle/v2/framework/tests/test_layers.py
+53
-36
python/paddle/v2/framework/tests/test_lod_rank_table.py
python/paddle/v2/framework/tests/test_lod_rank_table.py
+2
-2
python/paddle/v2/framework/tests/test_operator_desc.py
python/paddle/v2/framework/tests/test_operator_desc.py
+2
-2
python/paddle/v2/framework/tests/test_parameter.py
python/paddle/v2/framework/tests/test_parameter.py
+2
-2
python/paddle/v2/framework/tests/test_program.py
python/paddle/v2/framework/tests/test_program.py
+9
-9
python/paddle/v2/framework/tests/test_recognize_digits_conv.py
...n/paddle/v2/framework/tests/test_recognize_digits_conv.py
+25
-19
python/paddle/v2/framework/tests/test_recognize_digits_mlp.py
...on/paddle/v2/framework/tests/test_recognize_digits_mlp.py
+25
-18
python/paddle/v2/framework/tests/test_recommender_system.py
python/paddle/v2/framework/tests/test_recommender_system.py
+66
-64
python/paddle/v2/framework/tests/test_recurrent_op.py
python/paddle/v2/framework/tests/test_recurrent_op.py
+16
-14
python/paddle/v2/framework/tests/test_understand_sentiment_conv.py
...ddle/v2/framework/tests/test_understand_sentiment_conv.py
+3
-3
python/paddle/v2/framework/tests/test_variable.py
python/paddle/v2/framework/tests/test_variable.py
+2
-2
python/paddle/v2/framework/tests/test_word2vec.py
python/paddle/v2/framework/tests/test_word2vec.py
+34
-33
未找到文件。
python/paddle/v2/framework/framework.py
浏览文件 @
51d4afaa
...
...
@@ -550,5 +550,5 @@ class Parameter(Variable):
# program is a global instance.
g_program
=
Program
()
g_
init
_program
=
Program
()
g_
main_
program
=
Program
()
g_
startup
_program
=
Program
()
python/paddle/v2/framework/io.py
浏览文件 @
51d4afaa
import
os
import
cPickle
as
pickle
from
paddle.v2.framework.framework
import
Program
,
Parameter
,
g_program
,
\
from
paddle.v2.framework.framework
import
Program
,
Parameter
,
g_
main_
program
,
\
Variable
__all__
=
[
...
...
@@ -29,13 +29,13 @@ def _clone_var_in_block_(block, var):
persistable
=
True
)
def
save_vars
(
executor
,
dirname
,
program
=
None
,
vars
=
None
,
predicate
=
None
):
def
save_vars
(
executor
,
dirname
,
main_
program
=
None
,
vars
=
None
,
predicate
=
None
):
"""
Save variables to directory by executor.
:param executor: executor that save variable
:param dirname: directory path
:param program: program. If vars is None, then filter all variables in this
:param
main_
program: program. If vars is None, then filter all variables in this
program which fit `predicate`. Default g_program.
:param predicate: The Predicate describes a callable that returns a variable
as a bool. If it returns true, the variables will be saved.
...
...
@@ -44,15 +44,15 @@ def save_vars(executor, dirname, program=None, vars=None, predicate=None):
:return: None
"""
if
vars
is
None
:
if
program
is
None
:
program
=
g
_program
if
not
isinstance
(
program
,
Program
):
if
main_
program
is
None
:
main_program
=
g_main
_program
if
not
isinstance
(
main_
program
,
Program
):
raise
TypeError
(
"program should be as Program type or None"
)
save_vars
(
executor
,
dirname
=
dirname
,
vars
=
filter
(
predicate
,
program
.
list_vars
()))
vars
=
filter
(
predicate
,
main_
program
.
list_vars
()))
else
:
save_program
=
Program
()
save_block
=
save_program
.
global_block
()
...
...
@@ -66,37 +66,37 @@ def save_vars(executor, dirname, program=None, vars=None, predicate=None):
executor
.
run
(
save_program
)
def
save_params
(
executor
,
dirname
,
program
=
None
):
def
save_params
(
executor
,
dirname
,
main_
program
=
None
):
"""
Save all parameters to directory with executor.
"""
save_vars
(
executor
,
dirname
=
dirname
,
program
=
program
,
main_program
=
main_
program
,
vars
=
None
,
predicate
=
is_parameter
)
def
save_persistables
(
executor
,
dirname
,
program
=
None
):
def
save_persistables
(
executor
,
dirname
,
main_
program
=
None
):
"""
Save all persistables to directory with executor.
"""
save_vars
(
executor
,
dirname
=
dirname
,
program
=
program
,
main_program
=
main_
program
,
vars
=
None
,
predicate
=
is_persistable
)
def
load_vars
(
executor
,
dirname
,
program
=
None
,
vars
=
None
,
predicate
=
None
):
def
load_vars
(
executor
,
dirname
,
main_
program
=
None
,
vars
=
None
,
predicate
=
None
):
"""
Load variables from directory by executor.
:param executor: executor that save variable
:param dirname: directory path
:param program: program. If vars is None, then filter all variables in this
:param
main_
program: program. If vars is None, then filter all variables in this
program which fit `predicate`. Default g_program.
:param predicate: The Predicate describes a callable that returns a variable
as a bool. If it returns true, the variables will be loaded.
...
...
@@ -105,15 +105,15 @@ def load_vars(executor, dirname, program=None, vars=None, predicate=None):
:return: None
"""
if
vars
is
None
:
if
program
is
None
:
program
=
g
_program
if
not
isinstance
(
program
,
Program
):
if
main_
program
is
None
:
main_program
=
g_main
_program
if
not
isinstance
(
main_
program
,
Program
):
raise
TypeError
(
"program's type should be Program"
)
load_vars
(
executor
,
dirname
=
dirname
,
vars
=
filter
(
predicate
,
program
.
list_vars
()))
vars
=
filter
(
predicate
,
main_
program
.
list_vars
()))
else
:
load_prog
=
Program
()
load_block
=
load_prog
.
global_block
()
...
...
@@ -129,27 +129,33 @@ def load_vars(executor, dirname, program=None, vars=None, predicate=None):
executor
.
run
(
load_prog
)
def
load_params
(
executor
,
dirname
,
program
=
None
):
def
load_params
(
executor
,
dirname
,
main_
program
=
None
):
"""
load all parameters from directory by executor.
"""
load_vars
(
executor
,
dirname
=
dirname
,
program
=
program
,
predicate
=
is_parameter
)
executor
,
dirname
=
dirname
,
main_program
=
main_program
,
predicate
=
is_parameter
)
def
load_persistables
(
executor
,
dirname
,
program
=
None
):
def
load_persistables
(
executor
,
dirname
,
main_
program
=
None
):
"""
load all persistables from directory by executor.
"""
load_vars
(
executor
,
dirname
=
dirname
,
program
=
program
,
predicate
=
is_persistable
)
executor
,
dirname
=
dirname
,
main_program
=
main_program
,
predicate
=
is_persistable
)
def
save_inference_model
(
dirname
,
feeded_var_names
,
target_vars
,
executor
,
program
=
None
):
main_
program
=
None
):
"""
Build a model especially for inference,
and save it to directory by the executor.
...
...
@@ -158,20 +164,20 @@ def save_inference_model(dirname,
:param feeded_var_names: Names of variables that need to be feeded data during inference
:param target_vars: Variables from which we can get inference results.
:param executor: executor that save inference model
:param program: original program, which will be pruned to build the inference model.
:param
main_
program: original program, which will be pruned to build the inference model.
Default g_program.
:return: None
"""
if
program
is
None
:
program
=
g
_program
if
main_
program
is
None
:
main_program
=
g_main
_program
if
not
isinstance
(
target_vars
,
list
):
target_vars
=
[
target_vars
]
if
not
os
.
path
.
isdir
(
dirname
):
os
.
makedirs
(
dirname
)
pruned_program
=
program
.
prune
(
target_vars
)
pruned_program
=
main_
program
.
prune
(
target_vars
)
fetch_var_names
=
[
v
.
name
for
v
in
target_vars
]
model_file_name
=
dirname
+
"/__model__"
...
...
@@ -182,10 +188,10 @@ def save_inference_model(dirname,
"fetch_var_names"
:
fetch_var_names
},
f
,
-
1
)
save_params
(
executor
,
dirname
,
program
)
save_params
(
executor
,
dirname
,
main_
program
)
def
load_persistables_if_exist
(
executor
,
dirname
,
program
=
None
):
def
load_persistables_if_exist
(
executor
,
dirname
,
main_
program
=
None
):
filenames
=
next
(
os
.
walk
(
dirname
))[
2
]
filenames
=
set
(
filenames
)
...
...
@@ -198,7 +204,7 @@ def load_persistables_if_exist(executor, dirname, program=None):
load_vars
(
executor
,
dirname
,
program
=
program
,
main_program
=
main_
program
,
vars
=
None
,
predicate
=
_is_presistable_and_exist_
)
...
...
python/paddle/v2/framework/layer_helper.py
浏览文件 @
51d4afaa
import
copy
import
itertools
from
paddle.v2.framework.framework
import
Variable
,
g_program
,
\
g_
init
_program
,
unique_name
,
Program
from
paddle.v2.framework.framework
import
Variable
,
g_
main_
program
,
\
g_
startup
_program
,
unique_name
,
Program
from
paddle.v2.framework.initializer
import
ConstantInitializer
,
\
UniformInitializer
...
...
@@ -20,23 +20,23 @@ class LayerHelper(object):
return
self
.
kwargs
[
'name'
]
@
property
def
program
(
self
):
prog
=
self
.
kwargs
.
get
(
'program'
,
None
)
def
main_
program
(
self
):
prog
=
self
.
kwargs
.
get
(
'
main_
program'
,
None
)
if
prog
is
None
:
return
g_program
return
g_
main_
program
else
:
return
prog
@
property
def
init
_program
(
self
):
prog
=
self
.
kwargs
.
get
(
'
init
_program'
,
None
)
def
startup
_program
(
self
):
prog
=
self
.
kwargs
.
get
(
'
startup
_program'
,
None
)
if
prog
is
None
:
return
g_
init
_program
return
g_
startup
_program
else
:
return
prog
def
append_op
(
self
,
*
args
,
**
kwargs
):
return
self
.
program
.
current_block
().
append_op
(
*
args
,
**
kwargs
)
return
self
.
main_
program
.
current_block
().
append_op
(
*
args
,
**
kwargs
)
def
multiple_input
(
self
,
input_param_name
=
'input'
):
inputs
=
self
.
kwargs
.
get
(
input_param_name
,
[])
...
...
@@ -120,27 +120,27 @@ class LayerHelper(object):
attr_copy
[
'initializer'
]
=
initializer
if
attr_copy
[
'name'
]
is
None
:
attr_copy
[
'name'
]
=
unique_name
(
"."
.
join
([
self
.
name
,
suffix
]))
self
.
init
_program
.
global_block
().
create_parameter
(
self
.
startup
_program
.
global_block
().
create_parameter
(
dtype
=
dtype
,
shape
=
shape
,
**
attr_copy
)
return
self
.
program
.
global_block
().
create_parameter
(
return
self
.
main_
program
.
global_block
().
create_parameter
(
name
=
attr_copy
[
'name'
],
dtype
=
dtype
,
shape
=
shape
)
def
create_tmp_variable
(
self
,
dtype
):
return
self
.
program
.
current_block
().
create_var
(
return
self
.
main_
program
.
current_block
().
create_var
(
name
=
unique_name
(
"."
.
join
([
self
.
name
,
'tmp'
])),
dtype
=
dtype
,
persistable
=
False
)
def
create_variable
(
self
,
*
args
,
**
kwargs
):
return
self
.
program
.
current_block
().
create_var
(
*
args
,
**
kwargs
)
return
self
.
main_
program
.
current_block
().
create_var
(
*
args
,
**
kwargs
)
def
create_global_variable
(
self
,
persistable
=
False
,
*
args
,
**
kwargs
):
return
self
.
program
.
global_block
().
create_var
(
return
self
.
main_
program
.
global_block
().
create_var
(
*
args
,
persistable
=
persistable
,
**
kwargs
)
def
set_variable_initializer
(
self
,
var
,
initializer
):
assert
isinstance
(
var
,
Variable
)
self
.
init
_program
.
global_block
().
create_var
(
self
.
startup
_program
.
global_block
().
create_var
(
name
=
var
.
name
,
type
=
var
.
type
,
dtype
=
var
.
data_type
,
...
...
python/paddle/v2/framework/layers.py
浏览文件 @
51d4afaa
...
...
@@ -18,8 +18,8 @@ def fc(input,
name
=
None
,
act
=
None
,
num_flatten_dims
=
1
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
# create helper
helper
=
LayerHelper
(
'fc'
,
**
locals
())
...
...
@@ -64,8 +64,8 @@ def embedding(input,
data_type
=
'float32'
,
is_sparse
=
False
,
param_attr
=
None
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
helper
=
LayerHelper
(
'embedding'
,
**
locals
())
w
=
helper
.
create_parameter
(
attr
=
helper
.
param_attr
,
shape
=
size
,
dtype
=
data_type
)
...
...
@@ -84,8 +84,8 @@ def data(name,
data_type
=
'float32'
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
append_batch_size
=
True
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
helper
=
LayerHelper
(
'data'
,
**
locals
())
shape
=
list
(
shape
)
for
i
in
xrange
(
len
(
shape
)):
...
...
@@ -178,7 +178,7 @@ _create_op_func_('sigmoid')
_create_op_func_
(
'scale'
)
def
cast
(
x
,
data_type
,
program
=
None
):
def
cast
(
x
,
data_type
,
main_
program
=
None
):
helper
=
LayerHelper
(
'cast'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
data_type
)
helper
.
append_op
(
...
...
@@ -190,7 +190,7 @@ def cast(x, data_type, program=None):
return
out
def
concat
(
input
,
axis
,
program
=
None
,
init
_program
=
None
):
def
concat
(
input
,
axis
,
main_program
=
None
,
startup
_program
=
None
):
helper
=
LayerHelper
(
'concat'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
...
...
@@ -201,7 +201,7 @@ def concat(input, axis, program=None, init_program=None):
return
out
def
sums
(
input
,
program
=
None
,
init
_program
=
None
):
def
sums
(
input
,
main_program
=
None
,
startup
_program
=
None
):
helper
=
LayerHelper
(
'sum'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'sum'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
})
...
...
@@ -281,8 +281,8 @@ def sequence_conv(input,
padding
=
None
,
bias_attr
=
None
,
param_attr
=
None
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
# FIXME(dzh) : want to unify the argument of python layer
# function. So we ignore some unecessary attributes.
# such as, padding_trainable, context_start.
...
...
@@ -321,8 +321,8 @@ def conv2d(input,
padding
=
None
,
bias_attr
=
None
,
param_attr
=
None
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
helper
=
LayerHelper
(
'conv2d'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -388,8 +388,8 @@ def pool2d(input,
pool_stride
=
[
1
,
1
],
pool_padding
=
[
0
,
0
],
global_pooling
=
False
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
if
pool_type
not
in
[
"max"
,
"avg"
]:
raise
ValueError
(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'."
,
...
...
@@ -428,8 +428,8 @@ def batch_norm(input,
param_attr
=
None
,
bias_attr
=
None
,
data_layout
=
'NCHW'
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
helper
=
LayerHelper
(
'batch_norm'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -505,16 +505,16 @@ class BlockGuard(object):
keyword.
"""
def
__init__
(
self
,
program
):
if
not
isinstance
(
program
,
Program
):
def
__init__
(
self
,
main_
program
):
if
not
isinstance
(
main_
program
,
Program
):
raise
TypeError
(
"BlockGuard takes a program"
)
self
.
program
=
program
self
.
main_program
=
main_
program
def
__enter__
(
self
):
self
.
program
.
create_block
()
self
.
main_
program
.
create_block
()
def
__exit__
(
self
,
exc_type
,
exc_val
,
exc_tb
):
self
.
program
.
rollback
()
self
.
main_
program
.
rollback
()
if
exc_type
is
not
None
:
return
False
# re-raise exception
return
True
...
...
@@ -524,7 +524,7 @@ class StaticRNNGuard(BlockGuard):
def
__init__
(
self
,
rnn
):
if
not
isinstance
(
rnn
,
StaticRNN
):
raise
TypeError
(
"StaticRNNGuard takes an StaticRNN"
)
super
(
StaticRNNGuard
,
self
).
__init__
(
rnn
.
helper
.
program
)
super
(
StaticRNNGuard
,
self
).
__init__
(
rnn
.
helper
.
main_
program
)
self
.
rnn
=
rnn
def
__enter__
(
self
):
...
...
@@ -560,8 +560,9 @@ class StaticRNN(object):
IN_RNN_BLOCK
=
1
AFTER_RNN_BLOCK
=
2
def
__init__
(
self
,
name
=
None
,
program
=
None
):
self
.
helper
=
LayerHelper
(
"static_rnn"
,
name
=
name
,
program
=
program
)
def
__init__
(
self
,
name
=
None
,
main_program
=
None
):
self
.
helper
=
LayerHelper
(
"static_rnn"
,
name
=
name
,
main_program
=
main_program
)
self
.
memories
=
{}
# memory map, from pre_mem.name --> MemoryLink
self
.
inputs
=
[]
# input variable list in current block
self
.
outputs
=
[]
# output variable list in parent block
...
...
@@ -653,7 +654,7 @@ class StaticRNN(object):
self
.
memories
[
mem
.
name
].
mem
=
var
def
parent_block
(
self
):
prog
=
self
.
helper
.
program
prog
=
self
.
helper
.
main_
program
parent_idx
=
prog
.
current_block
().
parent_idx
assert
parent_idx
>=
0
parent_block
=
prog
.
block
(
parent_idx
)
...
...
@@ -670,8 +671,8 @@ class StaticRNN(object):
return
self
.
outputs
def
complete_rnn_op
(
self
):
program
=
self
.
helper
.
program
rnn_block
=
program
.
current_block
()
main_program
=
self
.
helper
.
main_
program
rnn_block
=
main_
program
.
current_block
()
parent_block
=
self
.
parent_block
()
local_inputs
=
set
()
...
...
@@ -737,7 +738,7 @@ class StaticRNN(object):
})
def
lod_rank_table
(
x
,
level
=
0
,
program
=
None
):
def
lod_rank_table
(
x
,
level
=
0
,
main_
program
=
None
):
helper
=
LayerHelper
(
"lod_rank_table"
,
**
locals
())
table
=
helper
.
create_variable
(
type
=
core
.
VarDesc
.
VarType
.
LOD_RANK_TABLE
,
...
...
python/paddle/v2/framework/net_drawer.py
浏览文件 @
51d4afaa
...
...
@@ -80,7 +80,7 @@ def parse_graph(program, graph, var_dict, **kwargs):
graph
.
edge
(
**
draw_edge
(
var_dict
,
op
,
e
,
arg
))
def
draw_graph
(
init_program
,
program
,
**
kwargs
):
def
draw_graph
(
startup_program
,
main_
program
,
**
kwargs
):
if
kwargs
.
has_key
(
"graph_attr"
):
GRAPH_STYLE
.
update
(
kwargs
[
graph_attr
])
if
kwargs
.
has_key
(
"node_attr"
):
...
...
@@ -101,8 +101,8 @@ def draw_graph(init_program, program, **kwargs):
**
kwargs
)
var_dict
=
{}
parse_graph
(
init
_program
,
g
,
var_dict
)
parse_graph
(
program
,
g
,
var_dict
)
parse_graph
(
startup
_program
,
g
,
var_dict
)
parse_graph
(
main_
program
,
g
,
var_dict
)
if
filename
!=
None
:
g
.
save
()
...
...
python/paddle/v2/framework/nets.py
浏览文件 @
51d4afaa
...
...
@@ -10,23 +10,23 @@ def simple_img_conv_pool(input,
pool_stride
,
act
,
pool_type
=
'max'
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
conv_out
=
layers
.
conv2d
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
act
=
act
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
pool_out
=
layers
.
pool2d
(
input
=
conv_out
,
pool_size
=
pool_size
,
pool_type
=
pool_type
,
pool_stride
=
pool_stride
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
return
pool_out
...
...
@@ -40,8 +40,8 @@ def img_conv_group(input,
conv_batchnorm_drop_rate
=
None
,
pool_stride
=
1
,
pool_type
=
None
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
"""
Image Convolution Group, Used for vgg net.
"""
...
...
@@ -71,30 +71,30 @@ def img_conv_group(input,
filter_size
=
conv_filter_size
[
i
],
padding
=
conv_padding
[
i
],
act
=
local_conv_act
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
if
conv_with_batchnorm
[
i
]:
tmp
=
layers
.
batch_norm
(
input
=
tmp
,
act
=
conv_act
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
drop_rate
=
conv_batchnorm_drop_rate
[
i
]
if
abs
(
drop_rate
)
>
1e-5
:
tmp
=
layers
.
dropout
(
x
=
tmp
,
dropout_prob
=
drop_rate
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
pool_out
=
layers
.
pool2d
(
input
=
tmp
,
pool_size
=
pool_size
,
pool_type
=
pool_type
,
pool_stride
=
pool_stride
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
return
pool_out
...
...
@@ -103,19 +103,19 @@ def sequence_conv_pool(input,
filter_size
,
act
=
"sigmoid"
,
pool_type
=
"max"
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
conv_out
=
layers
.
sequence_conv
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
act
=
act
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
pool_out
=
layers
.
sequence_pool
(
input
=
conv_out
,
pool_type
=
pool_type
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
return
pool_out
python/paddle/v2/framework/optimizer.py
浏览文件 @
51d4afaa
...
...
@@ -132,7 +132,7 @@ class Optimizer(object):
def
create_optimization_pass
(
self
,
parameters_and_grads
,
loss
,
init
_program
=
None
):
startup
_program
=
None
):
"""Add optimization operators to update gradients to variables.
Args:
...
...
@@ -144,7 +144,7 @@ class Optimizer(object):
optimization. This will include parameter update ops, global step
update ops and any other custom ops required by subclasses to manage
their internal state.
:param
init
_program:
:param
startup
_program:
"""
# This is a default implementation of create_optimization_pass that
# can be shared by most optimizers. This implementation assumes that
...
...
@@ -156,7 +156,9 @@ class Optimizer(object):
# Create any accumulators
program
=
loss
.
block
.
program
self
.
helper
=
LayerHelper
(
self
.
__class__
.
__name__
,
program
=
program
,
init_program
=
init_program
)
self
.
__class__
.
__name__
,
main_program
=
program
,
startup_program
=
startup_program
)
self
.
_create_accumulators
(
loss
.
block
,
[
p
[
0
]
for
p
in
parameters_and_grads
])
# Create any necessary tensors
...
...
@@ -185,7 +187,7 @@ class Optimizer(object):
def
minimize
(
self
,
loss
,
init
_program
=
None
,
startup
_program
=
None
,
parameter_list
=
None
,
no_grad_set
=
None
):
"""Add operations to minimize `loss` by updating `parameter_list`.
...
...
@@ -198,7 +200,7 @@ class Optimizer(object):
# Add regularization if any
params_grads
=
append_regularization_ops
(
params_grads
)
optimize_ops
=
self
.
create_optimization_pass
(
params_grads
,
loss
,
init
_program
)
startup
_program
)
return
optimize_ops
...
...
python/paddle/v2/framework/tests/test_executor_and_mul.py
浏览文件 @
51d4afaa
...
...
@@ -2,7 +2,7 @@ import unittest
from
paddle.v2.framework.layers
import
mul
,
data
import
paddle.v2.framework.core
as
core
from
paddle.v2.framework.executor
import
Executor
from
paddle.v2.framework.framework
import
g_program
from
paddle.v2.framework.framework
import
g_
main_
program
import
numpy
...
...
@@ -23,7 +23,7 @@ class TestExecutor(unittest.TestCase):
tensor_b
=
core
.
LoDTensor
()
tensor_b
.
set
(
b_np
,
place
)
exe
=
Executor
(
place
)
outs
=
exe
.
run
(
g_program
,
outs
=
exe
.
run
(
g_
main_
program
,
feed
=
{
'a'
:
tensor_a
,
'b'
:
tensor_b
},
fetch_list
=
[
out
])
...
...
python/paddle/v2/framework/tests/test_fit_a_line.py
浏览文件 @
51d4afaa
...
...
@@ -3,40 +3,44 @@ import paddle.v2.framework.layers as layers
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.optimizer
as
optimizer
from
paddle.v2.framework.framework
import
Program
,
g_program
from
paddle.v2.framework.framework
import
Program
,
g_
main_
program
from
paddle.v2.framework.io
import
save_persistables
,
load_persistables
from
paddle.v2.framework.executor
import
Executor
import
numpy
as
np
init
_program
=
Program
()
program
=
Program
()
startup
_program
=
Program
()
main_
program
=
Program
()
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
13
],
data_type
=
'float32'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
y_predict
=
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
data_type
=
'float32'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
cost
=
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
,
program
=
program
,
init_program
=
init_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
program
=
program
,
init_program
=
init_program
)
input
=
y_predict
,
label
=
y
,
main_program
=
main_program
,
startup_program
=
startup_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_program
=
main_program
,
startup_program
=
startup_program
)
sgd_optimizer
=
optimizer
.
SGDOptimizer
(
learning_rate
=
0.001
)
opts
=
sgd_optimizer
.
minimize
(
avg_cost
,
init
_program
)
opts
=
sgd_optimizer
.
minimize
(
avg_cost
,
startup
_program
)
BATCH_SIZE
=
20
...
...
@@ -48,12 +52,12 @@ train_reader = paddle.batch(
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
init
_program
,
feed
=
{},
fetch_list
=
[])
exe
.
run
(
startup
_program
,
feed
=
{},
fetch_list
=
[])
PASS_NUM
=
100
for
pass_id
in
range
(
PASS_NUM
):
save_persistables
(
exe
,
"./fit_a_line.model/"
,
program
=
program
)
load_persistables
(
exe
,
"./fit_a_line.model/"
,
program
=
program
)
save_persistables
(
exe
,
"./fit_a_line.model/"
,
main_program
=
main_
program
)
load_persistables
(
exe
,
"./fit_a_line.model/"
,
main_program
=
main_
program
)
for
data
in
train_reader
():
x_data
=
np
.
array
(
map
(
lambda
x
:
x
[
0
],
data
)).
astype
(
"float32"
)
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"float32"
)
...
...
@@ -65,7 +69,7 @@ for pass_id in range(PASS_NUM):
tensor_y
=
core
.
LoDTensor
()
tensor_y
.
set
(
y_data
,
place
)
# print tensor_y.get_dims()
outs
=
exe
.
run
(
program
,
outs
=
exe
.
run
(
main_
program
,
feed
=
{
'x'
:
tensor_x
,
'y'
:
tensor_y
},
fetch_list
=
[
avg_cost
])
...
...
python/paddle/v2/framework/tests/test_image_classification_layer.py
浏览文件 @
51d4afaa
...
...
@@ -9,8 +9,8 @@ def conv_block(input,
num_filter
,
groups
,
dropouts
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
return
nets
.
img_conv_group
(
input
=
input
,
pool_size
=
2
,
...
...
@@ -21,77 +21,81 @@ def conv_block(input,
conv_with_batchnorm
=
True
,
conv_batchnorm_drop_rate
=
dropouts
,
pool_type
=
'max'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
class
TestLayer
(
unittest
.
TestCase
):
def
test_batch_norm_layer
(
self
):
program
=
Program
()
init
_program
=
Program
()
main_
program
=
Program
()
startup
_program
=
Program
()
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
48
,
48
],
data_type
=
'float32'
,
program
=
program
)
main_program
=
main_
program
)
layers
.
batch_norm
(
input
=
images
,
program
=
program
,
init_program
=
init_program
)
input
=
images
,
main_program
=
main_program
,
startup_program
=
startup_program
)
# print str(program)
# print str(
main_
program)
def
test_dropout_layer
(
self
):
program
=
Program
()
init
_program
=
Program
()
main_
program
=
Program
()
startup
_program
=
Program
()
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
48
,
48
],
data_type
=
'float32'
,
program
=
program
)
main_program
=
main_
program
)
layers
.
dropout
(
x
=
images
,
dropout_prob
=
0.5
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
# print str(program)
# print str(
main_
program)
def
test_img_conv_group
(
self
):
program
=
Program
()
init
_program
=
Program
()
main_
program
=
Program
()
startup
_program
=
Program
()
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
48
,
48
],
data_type
=
'float32'
,
program
=
program
,
init_program
=
init_program
)
conv1
=
conv_block
(
images
,
64
,
2
,
[
0.3
,
0
],
program
,
init_program
)
conv2
=
conv_block
(
conv1
,
256
,
3
,
[
0.4
,
0.4
,
0
],
program
,
init_program
)
main_program
=
main_program
,
startup_program
=
startup_program
)
conv1
=
conv_block
(
images
,
64
,
2
,
[
0.3
,
0
],
main_program
,
startup_program
)
conv2
=
conv_block
(
conv1
,
256
,
3
,
[
0.4
,
0.4
,
0
],
main_program
,
startup_program
)
# print str(program)
# print str(
main_
program)
def
test_elementwise_add_with_act
(
self
):
program
=
Program
()
init
_program
=
Program
()
main_
program
=
Program
()
startup
_program
=
Program
()
image1
=
layers
.
data
(
name
=
'pixel1'
,
shape
=
[
3
,
48
,
48
],
data_type
=
'float32'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
image2
=
layers
.
data
(
name
=
'pixel2'
,
shape
=
[
3
,
48
,
48
],
data_type
=
'float32'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
out
=
layers
.
elementwise_add
(
x
=
image1
,
y
=
image2
,
act
=
'relu'
,
program
=
program
,
init_program
=
init
_program
)
# print(program)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
# print(
main_
program)
if
__name__
==
'__main__'
:
...
...
python/paddle/v2/framework/tests/test_image_classification_train.py
浏览文件 @
51d4afaa
...
...
@@ -5,19 +5,19 @@ import paddle.v2.framework.layers as layers
import
paddle.v2.framework.nets
as
nets
import
paddle.v2.framework.optimizer
as
optimizer
from
paddle.v2.framework.executor
import
Executor
from
paddle.v2.framework.framework
import
g_
init_program
,
g
_program
from
paddle.v2.framework.framework
import
g_
startup_program
,
g_main
_program
from
paddle.v2.framework.initializer
import
XavierInitializer
def
resnet_cifar10
(
input
,
depth
=
32
,
program
=
None
,
init
_program
=
None
):
def
resnet_cifar10
(
input
,
depth
=
32
,
main_program
=
None
,
startup
_program
=
None
):
def
conv_bn_layer
(
input
,
ch_out
,
filter_size
,
stride
,
padding
,
act
=
'relu'
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
tmp
=
layers
.
conv2d
(
input
=
input
,
filter_size
=
filter_size
,
...
...
@@ -26,10 +26,13 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None):
padding
=
padding
,
act
=
None
,
bias_attr
=
False
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
return
layers
.
batch_norm
(
input
=
tmp
,
act
=
act
,
program
=
program
,
init_program
=
init_program
)
input
=
tmp
,
act
=
act
,
main_program
=
main_program
,
startup_program
=
startup_program
)
def
shortcut
(
input
,
ch_in
,
ch_out
,
stride
,
program
,
init_program
):
if
ch_in
!=
ch_out
:
...
...
@@ -42,16 +45,16 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None):
ch_in
,
ch_out
,
stride
,
program
=
program
,
init_program
=
init
_program
):
main_program
=
main_
program
,
startup_program
=
startup
_program
):
tmp
=
conv_bn_layer
(
input
,
ch_out
,
3
,
stride
,
1
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
tmp
=
conv_bn_layer
(
tmp
,
ch_out
,
...
...
@@ -59,21 +62,22 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None):
1
,
1
,
act
=
None
,
program
=
program
,
init_program
=
init_program
)
short
=
shortcut
(
input
,
ch_in
,
ch_out
,
stride
,
program
,
init_program
)
main_program
=
main_program
,
startup_program
=
startup_program
)
short
=
shortcut
(
input
,
ch_in
,
ch_out
,
stride
,
main_program
,
startup_program
)
return
layers
.
elementwise_add
(
x
=
tmp
,
y
=
short
,
act
=
'relu'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
def
layer_warp
(
block_func
,
input
,
ch_in
,
ch_out
,
count
,
stride
,
program
,
init
_program
):
tmp
=
block_func
(
input
,
ch_in
,
ch_out
,
stride
,
program
,
init
_program
)
startup
_program
):
tmp
=
block_func
(
input
,
ch_in
,
ch_out
,
stride
,
program
,
startup
_program
)
for
i
in
range
(
1
,
count
):
tmp
=
block_func
(
tmp
,
ch_out
,
ch_out
,
1
,
program
,
init
_program
)
tmp
=
block_func
(
tmp
,
ch_out
,
ch_out
,
1
,
program
,
startup
_program
)
return
tmp
assert
(
depth
-
2
)
%
6
==
0
...
...
@@ -84,8 +88,8 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None):
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
res1
=
layer_warp
(
basicblock
,
conv1
,
...
...
@@ -93,8 +97,8 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None):
16
,
n
,
1
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
res2
=
layer_warp
(
basicblock
,
res1
,
...
...
@@ -102,8 +106,8 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None):
32
,
n
,
2
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
res3
=
layer_warp
(
basicblock
,
res2
,
...
...
@@ -111,25 +115,25 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None):
64
,
n
,
2
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
pool
=
layers
.
pool2d
(
input
=
res3
,
pool_size
=
8
,
pool_type
=
'avg'
,
pool_stride
=
1
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
return
pool
def
vgg16_bn_drop
(
input
,
program
=
None
,
init
_program
=
None
):
def
vgg16_bn_drop
(
input
,
main_program
=
None
,
startup
_program
=
None
):
def
conv_block
(
input
,
num_filter
,
groups
,
dropouts
,
program
=
None
,
init
_program
=
None
):
main_
program
=
None
,
startup
_program
=
None
):
return
nets
.
img_conv_group
(
input
=
input
,
pool_size
=
2
,
...
...
@@ -140,38 +144,50 @@ def vgg16_bn_drop(input, program=None, init_program=None):
conv_with_batchnorm
=
True
,
conv_batchnorm_drop_rate
=
dropouts
,
pool_type
=
'max'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
conv1
=
conv_block
(
input
,
64
,
2
,
[
0.3
,
0
],
program
,
init_program
)
conv2
=
conv_block
(
conv1
,
128
,
2
,
[
0.4
,
0
],
program
,
init_program
)
conv3
=
conv_block
(
conv2
,
256
,
3
,
[
0.4
,
0.4
,
0
],
program
,
init_program
)
conv4
=
conv_block
(
conv3
,
512
,
3
,
[
0.4
,
0.4
,
0
],
program
,
init_program
)
conv5
=
conv_block
(
conv4
,
512
,
3
,
[
0.4
,
0.4
,
0
],
program
,
init_program
)
conv1
=
conv_block
(
input
,
64
,
2
,
[
0.3
,
0
],
main_program
,
startup_program
)
conv2
=
conv_block
(
conv1
,
128
,
2
,
[
0.4
,
0
],
main_program
,
startup_program
)
conv3
=
conv_block
(
conv2
,
256
,
3
,
[
0.4
,
0.4
,
0
],
main_program
,
startup_program
)
conv4
=
conv_block
(
conv3
,
512
,
3
,
[
0.4
,
0.4
,
0
],
main_program
,
startup_program
)
conv5
=
conv_block
(
conv4
,
512
,
3
,
[
0.4
,
0.4
,
0
],
main_program
,
startup_program
)
drop
=
layers
.
dropout
(
x
=
conv5
,
dropout_prob
=
0.5
,
program
=
program
,
init_program
=
init_program
)
x
=
conv5
,
dropout_prob
=
0.5
,
main_program
=
main_program
,
startup_program
=
startup_program
)
fc1
=
layers
.
fc
(
input
=
drop
,
size
=
512
,
act
=
None
,
param_attr
=
{
"initializer"
:
XavierInitializer
()},
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
reshape1
=
layers
.
reshape
(
x
=
fc1
,
shape
=
list
(
fc1
.
shape
+
(
1
,
1
)),
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
bn
=
layers
.
batch_norm
(
input
=
reshape1
,
act
=
'relu'
,
program
=
program
,
init_program
=
init_program
)
input
=
reshape1
,
act
=
'relu'
,
main_program
=
main_program
,
startup_program
=
startup_program
)
drop2
=
layers
.
dropout
(
x
=
bn
,
dropout_prob
=
0.5
,
program
=
program
,
init_program
=
init_program
)
x
=
bn
,
dropout_prob
=
0.5
,
main_program
=
main_program
,
startup_program
=
startup_program
)
fc2
=
layers
.
fc
(
input
=
drop2
,
size
=
512
,
act
=
None
,
param_attr
=
{
"initializer"
:
XavierInitializer
()},
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
return
fc2
...
...
@@ -209,7 +225,7 @@ train_reader = paddle.batch(
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
g_
init
_program
,
feed
=
{},
fetch_list
=
[])
exe
.
run
(
g_
startup
_program
,
feed
=
{},
fetch_list
=
[])
for
pass_id
in
range
(
PASS_NUM
):
batch_id
=
0
...
...
@@ -227,7 +243,7 @@ for pass_id in range(PASS_NUM):
tensor_img
.
set
(
img_data
,
place
)
tensor_y
.
set
(
y_data
,
place
)
outs
=
exe
.
run
(
g_program
,
outs
=
exe
.
run
(
g_
main_
program
,
feed
=
{
"pixel"
:
tensor_img
,
"label"
:
tensor_y
},
fetch_list
=
[
avg_cost
,
accuracy
])
...
...
python/paddle/v2/framework/tests/test_inference_model_io.py
浏览文件 @
51d4afaa
...
...
@@ -3,7 +3,7 @@ import paddle.v2.framework.layers as layers
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.optimizer
as
optimizer
from
paddle.v2.framework.framework
import
Program
,
g_program
from
paddle.v2.framework.framework
import
Program
,
g_
main_
program
from
paddle.v2.framework.io
import
save_inference_model
,
load_inference_model
import
paddle.v2.framework.executor
as
executor
import
unittest
...
...
@@ -20,28 +20,28 @@ class TestBook(unittest.TestCase):
name
=
'x'
,
shape
=
[
2
],
data_type
=
'float32'
,
program
=
program
,
init
_program
=
init_program
)
main_
program
=
program
,
startup
_program
=
init_program
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
data_type
=
'float32'
,
program
=
program
,
init
_program
=
init_program
)
main_
program
=
program
,
startup
_program
=
init_program
)
y_predict
=
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
,
program
=
program
,
init
_program
=
init_program
)
main_
program
=
program
,
startup
_program
=
init_program
)
cost
=
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
,
program
=
program
,
init
_program
=
init_program
)
main_
program
=
program
,
startup
_program
=
init_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
program
=
program
,
init
_program
=
init_program
)
x
=
cost
,
main_program
=
program
,
startup
_program
=
init_program
)
sgd_optimizer
=
optimizer
.
SGDOptimizer
(
learning_rate
=
0.001
)
opts
=
sgd_optimizer
.
minimize
(
avg_cost
,
init_program
)
...
...
python/paddle/v2/framework/tests/test_layers.py
浏览文件 @
51d4afaa
import
paddle.v2.framework.layers
as
layers
import
paddle.v2.framework.nets
as
nets
from
paddle.v2.framework.framework
import
Program
,
g_program
from
paddle.v2.framework.framework
import
Program
,
g_
main_
program
import
paddle.v2.framework.core
as
core
import
unittest
...
...
@@ -9,15 +9,15 @@ class TestBook(unittest.TestCase):
def
test_fit_a_line
(
self
):
program
=
Program
()
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
13
],
data_type
=
'float32'
,
program
=
program
)
y_predict
=
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
,
program
=
program
)
name
=
'x'
,
shape
=
[
13
],
data_type
=
'float32'
,
main_
program
=
program
)
y_predict
=
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
,
main_
program
=
program
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
data_type
=
'float32'
,
program
=
program
)
name
=
'y'
,
shape
=
[
1
],
data_type
=
'float32'
,
main_
program
=
program
)
cost
=
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
,
program
=
program
)
input
=
y_predict
,
label
=
y
,
main_
program
=
program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
program
=
program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_
program
=
program
)
self
.
assertIsNotNone
(
avg_cost
)
program
.
append_backward
(
avg_cost
)
print
str
(
program
)
...
...
@@ -27,26 +27,42 @@ class TestBook(unittest.TestCase):
# Change g_program, so the rest layers use `g_program`
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
784
],
data_type
=
'float32'
,
program
=
program
)
name
=
'pixel'
,
shape
=
[
784
],
data_type
=
'float32'
,
main_program
=
program
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
data_type
=
'int32'
,
program
=
program
)
hidden1
=
layers
.
fc
(
input
=
images
,
size
=
128
,
act
=
'relu'
,
program
=
program
)
hidden2
=
layers
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
'relu'
,
program
=
program
)
name
=
'label'
,
shape
=
[
1
],
data_type
=
'int32'
,
main_program
=
program
)
hidden1
=
layers
.
fc
(
input
=
images
,
size
=
128
,
act
=
'relu'
,
main_program
=
program
)
hidden2
=
layers
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
'relu'
,
main_program
=
program
)
predict
=
layers
.
fc
(
input
=
hidden2
,
size
=
10
,
act
=
'softmax'
,
program
=
program
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
,
program
=
program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
program
=
program
)
main_program
=
program
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
,
main_program
=
program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_program
=
program
)
self
.
assertIsNotNone
(
avg_cost
)
print
str
(
program
)
def
test_simple_conv2d
(
self
):
program
=
Program
()
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
48
,
48
],
data_type
=
'int32'
,
program
=
program
)
name
=
'pixel'
,
shape
=
[
3
,
48
,
48
],
data_type
=
'int32'
,
main_program
=
program
)
layers
.
conv2d
(
input
=
images
,
num_filters
=
3
,
filter_size
=
[
4
,
4
],
program
=
program
)
input
=
images
,
num_filters
=
3
,
filter_size
=
[
4
,
4
],
main_program
=
program
)
print
str
(
program
)
...
...
@@ -57,9 +73,9 @@ class TestBook(unittest.TestCase):
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
data_type
=
'float32'
,
program
=
program
)
main_
program
=
program
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
data_type
=
'int32'
,
program
=
program
)
name
=
'label'
,
shape
=
[
1
],
data_type
=
'int32'
,
main_
program
=
program
)
conv_pool_1
=
nets
.
simple_img_conv_pool
(
input
=
images
,
filter_size
=
5
,
...
...
@@ -67,7 +83,7 @@ class TestBook(unittest.TestCase):
pool_size
=
2
,
pool_stride
=
2
,
act
=
"relu"
,
program
=
program
)
main_
program
=
program
)
conv_pool_2
=
nets
.
simple_img_conv_pool
(
input
=
conv_pool_1
,
filter_size
=
5
,
...
...
@@ -75,14 +91,15 @@ class TestBook(unittest.TestCase):
pool_size
=
2
,
pool_stride
=
2
,
act
=
"relu"
,
program
=
program
)
main_
program
=
program
)
predict
=
layers
.
fc
(
input
=
conv_pool_2
,
size
=
10
,
act
=
"softmax"
,
program
=
program
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
,
program
=
program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
program
=
program
)
main_program
=
program
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
,
main_program
=
program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_program
=
program
)
program
.
append_backward
(
avg_cost
)
...
...
@@ -93,58 +110,58 @@ class TestBook(unittest.TestCase):
dict_size
=
10000
embed_size
=
32
first_word
=
layers
.
data
(
name
=
'firstw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
)
name
=
'firstw'
,
shape
=
[
1
],
data_type
=
'int64'
,
main_
program
=
program
)
second_word
=
layers
.
data
(
name
=
'secondw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
)
name
=
'secondw'
,
shape
=
[
1
],
data_type
=
'int64'
,
main_
program
=
program
)
third_word
=
layers
.
data
(
name
=
'thirdw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
)
name
=
'thirdw'
,
shape
=
[
1
],
data_type
=
'int64'
,
main_
program
=
program
)
forth_word
=
layers
.
data
(
name
=
'forthw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
)
name
=
'forthw'
,
shape
=
[
1
],
data_type
=
'int64'
,
main_
program
=
program
)
next_word
=
layers
.
data
(
name
=
'nextw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
)
name
=
'nextw'
,
shape
=
[
1
],
data_type
=
'int64'
,
main_
program
=
program
)
embed_first
=
layers
.
embedding
(
input
=
first_word
,
size
=
[
dict_size
,
embed_size
],
data_type
=
'float32'
,
param_attr
=
{
'name'
:
'shared_w'
},
program
=
program
)
main_
program
=
program
)
embed_second
=
layers
.
embedding
(
input
=
second_word
,
size
=
[
dict_size
,
embed_size
],
data_type
=
'float32'
,
param_attr
=
{
'name'
:
'shared_w'
},
program
=
program
)
main_
program
=
program
)
embed_third
=
layers
.
embedding
(
input
=
third_word
,
size
=
[
dict_size
,
embed_size
],
data_type
=
'float32'
,
param_attr
=
{
'name'
:
'shared_w'
},
program
=
program
)
main_
program
=
program
)
embed_forth
=
layers
.
embedding
(
input
=
forth_word
,
size
=
[
dict_size
,
embed_size
],
data_type
=
'float32'
,
param_attr
=
{
'name'
:
'shared_w'
},
program
=
program
)
main_
program
=
program
)
concat_embed
=
layers
.
concat
(
input
=
[
embed_first
,
embed_second
,
embed_third
,
embed_forth
],
axis
=
1
,
program
=
program
)
main_
program
=
program
)
hidden1
=
layers
.
fc
(
input
=
concat_embed
,
size
=
256
,
act
=
'sigmoid'
,
program
=
program
)
main_
program
=
program
)
predict_word
=
layers
.
fc
(
input
=
hidden1
,
size
=
dict_size
,
act
=
'softmax'
,
program
=
program
)
main_
program
=
program
)
cost
=
layers
.
cross_entropy
(
input
=
predict_word
,
label
=
next_word
,
program
=
program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
program
=
program
)
input
=
predict_word
,
label
=
next_word
,
main_
program
=
program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_
program
=
program
)
self
.
assertIsNotNone
(
avg_cost
)
print
str
(
program
)
...
...
python/paddle/v2/framework/tests/test_lod_rank_table.py
浏览文件 @
51d4afaa
from
paddle.v2.framework.layers
import
lod_rank_table
,
data
from
paddle.v2.framework.executor
import
Executor
from
paddle.v2.framework.framework
import
g_program
from
paddle.v2.framework.framework
import
g_
main_
program
import
paddle.v2.framework.core
as
core
import
numpy
import
unittest
...
...
@@ -19,7 +19,7 @@ class TestLoDRankTable(unittest.TestCase):
tensor
.
set
(
numpy
.
random
.
random
(
size
=
(
17
,
100
)),
cpu
)
tensor
.
set_lod
([[
0
,
1
,
3
],
[
0
,
5
,
6
,
7
],
[
0
,
3
,
4
,
9
,
10
,
13
,
16
,
17
]])
exe
.
run
(
g_program
,
scope
=
scope
,
feed
=
{
'x'
:
tensor
})
exe
.
run
(
g_
main_
program
,
scope
=
scope
,
feed
=
{
'x'
:
tensor
})
var
=
scope
.
find_var
(
rank_table
.
name
)
table
=
var
.
get_lod_rank_table
()
self
.
assertEqual
([(
0
,
5
),
(
1
,
1
),
(
2
,
1
)],
table
.
items
())
...
...
python/paddle/v2/framework/tests/test_operator_desc.py
浏览文件 @
51d4afaa
import
unittest
from
paddle.v2.framework.framework
import
Variable
,
Program
,
g_program
from
paddle.v2.framework.framework
import
Variable
,
Program
,
g_
main_
program
import
paddle.v2.framework.core
as
core
class
TestOperator
(
unittest
.
TestCase
):
def
test_error_type
(
self
):
block
=
g_program
.
create_block
()
block
=
g_
main_
program
.
create_block
()
try
:
block
.
append_op
()
self
.
assertFail
()
...
...
python/paddle/v2/framework/tests/test_parameter.py
浏览文件 @
51d4afaa
import
unittest
from
paddle.v2.framework.framework
import
g_program
from
paddle.v2.framework.framework
import
g_
main_
program
import
paddle.v2.framework.core
as
core
class
TestParameter
(
unittest
.
TestCase
):
def
test_param
(
self
):
b
=
g_program
.
create_block
()
b
=
g_
main_
program
.
create_block
()
param
=
b
.
create_parameter
(
name
=
'fc.w'
,
shape
=
[
784
,
100
],
...
...
python/paddle/v2/framework/tests/test_program.py
浏览文件 @
51d4afaa
...
...
@@ -2,35 +2,35 @@ import unittest
import
paddle.v2.framework.core
as
core
from
paddle.v2.framework.framework
import
Program
from
paddle.v2.framework.framework
import
g_program
from
paddle.v2.framework.framework
import
g_
main_
program
class
TestProgram
(
unittest
.
TestCase
):
def
test_program
(
self
):
b
=
g_program
.
current_block
()
b
=
g_
main_
program
.
current_block
()
self
.
assertEqual
(
-
1
,
b
.
parent_idx
)
self
.
assertEqual
(
0
,
b
.
idx
)
b
=
g_program
.
create_block
()
b
=
g_
main_
program
.
create_block
()
self
.
assertEqual
(
1
,
b
.
idx
)
self
.
assertEqual
(
0
,
b
.
parent_idx
)
b
=
g_program
.
create_block
()
b
=
g_
main_
program
.
create_block
()
self
.
assertEqual
(
2
,
b
.
idx
)
self
.
assertEqual
(
1
,
b
.
parent_idx
)
g_program
.
rollback
()
g_
main_
program
.
rollback
()
b
=
g_program
.
current_block
()
b
=
g_
main_
program
.
current_block
()
self
.
assertEqual
(
1
,
b
.
idx
)
self
.
assertEqual
(
0
,
b
.
parent_idx
)
b
=
g_program
.
create_block
()
b
=
g_
main_
program
.
create_block
()
self
.
assertEqual
(
3
,
b
.
idx
)
self
.
assertEqual
(
1
,
b
.
parent_idx
)
g_program
.
rollback
()
b
=
g_program
.
current_block
()
g_
main_
program
.
rollback
()
b
=
g_
main_
program
.
current_block
()
self
.
assertEqual
(
1
,
b
.
idx
)
self
.
assertEqual
(
0
,
b
.
parent_idx
)
...
...
python/paddle/v2/framework/tests/test_recognize_digits_conv.py
浏览文件 @
51d4afaa
...
...
@@ -4,26 +4,26 @@ import paddle.v2.framework.nets as nets
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.optimizer
as
optimizer
from
paddle.v2.framework.framework
import
Program
,
g_program
from
paddle.v2.framework.framework
import
Program
,
g_
main_
program
from
paddle.v2.framework.executor
import
Executor
import
numpy
as
np
init
_program
=
Program
()
program
=
Program
()
startup
_program
=
Program
()
main_
program
=
Program
()
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
data_type
=
'float32'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
conv_pool_1
=
nets
.
simple_img_conv_pool
(
input
=
images
,
filter_size
=
5
,
...
...
@@ -31,8 +31,8 @@ conv_pool_1 = nets.simple_img_conv_pool(
pool_size
=
2
,
pool_stride
=
2
,
act
=
"relu"
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
conv_pool_2
=
nets
.
simple_img_conv_pool
(
input
=
conv_pool_1
,
filter_size
=
5
,
...
...
@@ -40,24 +40,30 @@ conv_pool_2 = nets.simple_img_conv_pool(
pool_size
=
2
,
pool_stride
=
2
,
act
=
"relu"
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
predict
=
layers
.
fc
(
input
=
conv_pool_2
,
size
=
10
,
act
=
"softmax"
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
,
program
=
program
,
init_program
=
init_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
program
=
program
)
input
=
predict
,
label
=
label
,
main_program
=
main_program
,
startup_program
=
startup_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_program
=
main_program
)
accuracy
=
layers
.
accuracy
(
input
=
predict
,
label
=
label
,
program
=
program
,
init_program
=
init_program
)
input
=
predict
,
label
=
label
,
main_program
=
main_program
,
startup_program
=
startup_program
)
# optimizer = optimizer.MomentumOptimizer(learning_rate=0.1 / 128.0,
# momentum=0.9)
optimizer
=
optimizer
.
AdamOptimizer
(
learning_rate
=
0.01
,
beta1
=
0.9
,
beta2
=
0.999
)
opts
=
optimizer
.
minimize
(
avg_cost
,
init
_program
)
opts
=
optimizer
.
minimize
(
avg_cost
,
startup
_program
)
BATCH_SIZE
=
50
PASS_NUM
=
3
...
...
@@ -69,7 +75,7 @@ train_reader = paddle.batch(
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
init
_program
,
feed
=
{},
fetch_list
=
[])
exe
.
run
(
startup
_program
,
feed
=
{},
fetch_list
=
[])
for
pass_id
in
range
(
PASS_NUM
):
count
=
0
...
...
@@ -84,7 +90,7 @@ for pass_id in range(PASS_NUM):
tensor_img
.
set
(
img_data
,
place
)
tensor_y
.
set
(
y_data
,
place
)
outs
=
exe
.
run
(
program
,
outs
=
exe
.
run
(
main_
program
,
feed
=
{
"pixel"
:
tensor_img
,
"label"
:
tensor_y
},
fetch_list
=
[
avg_cost
,
accuracy
])
...
...
python/paddle/v2/framework/tests/test_recognize_digits_mlp.py
浏览文件 @
51d4afaa
...
...
@@ -11,14 +11,14 @@ from paddle.v2.framework.initializer import UniformInitializer
import
numpy
as
np
BATCH_SIZE
=
128
init
_program
=
Program
()
program
=
Program
()
startup
_program
=
Program
()
main_
program
=
Program
()
image
=
layers
.
data
(
name
=
'x'
,
shape
=
[
784
],
data_type
=
'float32'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
param_attr
=
{
'name'
:
None
,
...
...
@@ -30,38 +30,45 @@ param_attr = {
hidden1
=
layers
.
fc
(
input
=
image
,
size
=
128
,
act
=
'relu'
,
program
=
program
,
init_program
=
init
_program
,
main_program
=
main_
program
,
startup_program
=
startup
_program
,
param_attr
=
param_attr
)
hidden2
=
layers
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
'relu'
,
program
=
program
,
init_program
=
init
_program
,
main_program
=
main_
program
,
startup_program
=
startup
_program
,
param_attr
=
param_attr
)
predict
=
layers
.
fc
(
input
=
hidden2
,
size
=
10
,
act
=
'softmax'
,
program
=
program
,
init_program
=
init
_program
,
main_program
=
main_
program
,
startup_program
=
startup
_program
,
param_attr
=
param_attr
)
label
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
,
program
=
program
,
init_program
=
init_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
program
=
program
,
init_program
=
init_program
)
input
=
predict
,
label
=
label
,
main_program
=
main_program
,
startup_program
=
startup_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_program
=
main_program
,
startup_program
=
startup_program
)
accuracy
=
layers
.
accuracy
(
input
=
predict
,
label
=
label
,
program
=
program
,
init_program
=
init_program
)
input
=
predict
,
label
=
label
,
main_program
=
main_program
,
startup_program
=
startup_program
)
optimizer
=
optimizer
.
MomentumOptimizer
(
learning_rate
=
0.001
,
momentum
=
0.9
)
opts
=
optimizer
.
minimize
(
avg_cost
,
init
_program
)
opts
=
optimizer
.
minimize
(
avg_cost
,
startup
_program
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
...
...
@@ -71,7 +78,7 @@ train_reader = paddle.batch(
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
init
_program
,
feed
=
{},
fetch_list
=
[])
exe
.
run
(
startup
_program
,
feed
=
{},
fetch_list
=
[])
PASS_NUM
=
100
for
pass_id
in
range
(
PASS_NUM
):
...
...
@@ -86,7 +93,7 @@ for pass_id in range(PASS_NUM):
tensor_y
=
core
.
LoDTensor
()
tensor_y
.
set
(
y_data
,
place
)
outs
=
exe
.
run
(
program
,
outs
=
exe
.
run
(
main_
program
,
feed
=
{
'x'
:
tensor_x
,
'y'
:
tensor_y
},
fetch_list
=
[
avg_cost
,
accuracy
])
...
...
python/paddle/v2/framework/tests/test_recommender_system.py
浏览文件 @
51d4afaa
...
...
@@ -4,13 +4,13 @@ import paddle.v2.framework.nets as nets
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.optimizer
as
optimizer
from
paddle.v2.framework.framework
import
Program
,
g_program
from
paddle.v2.framework.framework
import
Program
,
g_
main_
program
from
paddle.v2.framework.executor
import
Executor
import
numpy
as
np
init
_program
=
Program
()
program
=
Program
()
startup
_program
=
Program
()
main_
program
=
Program
()
is_sparse
=
True
use_gpu
=
False
BATCH_SIZE
=
256
...
...
@@ -26,8 +26,8 @@ def get_usr_combined_features():
name
=
'user_id'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
usr_emb
=
layers
.
embedding
(
input
=
uid
,
...
...
@@ -35,13 +35,13 @@ def get_usr_combined_features():
size
=
[
USR_DICT_SIZE
,
32
],
param_attr
=
{
'name'
:
'user_table'
},
is_sparse
=
is_sparse
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
usr_fc
=
layers
.
fc
(
input
=
usr_emb
,
size
=
32
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
USR_GENDER_DICT_SIZE
=
2
...
...
@@ -49,75 +49,75 @@ def get_usr_combined_features():
name
=
'gender_id'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
usr_gender_emb
=
layers
.
embedding
(
input
=
usr_gender_id
,
size
=
[
USR_GENDER_DICT_SIZE
,
16
],
param_attr
=
{
'name'
:
'gender_table'
},
is_sparse
=
is_sparse
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
usr_gender_fc
=
layers
.
fc
(
input
=
usr_gender_emb
,
size
=
16
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
USR_AGE_DICT_SIZE
=
len
(
paddle
.
dataset
.
movielens
.
age_table
)
usr_age_id
=
layers
.
data
(
name
=
'age_id'
,
shape
=
[
1
],
data_type
=
"int64"
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
usr_age_emb
=
layers
.
embedding
(
input
=
usr_age_id
,
size
=
[
USR_AGE_DICT_SIZE
,
16
],
is_sparse
=
is_sparse
,
param_attr
=
{
'name'
:
'age_table'
},
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
usr_age_fc
=
layers
.
fc
(
input
=
usr_age_emb
,
size
=
16
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
USR_JOB_DICT_SIZE
=
paddle
.
dataset
.
movielens
.
max_job_id
()
+
1
usr_job_id
=
layers
.
data
(
name
=
'job_id'
,
shape
=
[
1
],
data_type
=
"int64"
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
usr_job_emb
=
layers
.
embedding
(
input
=
usr_job_id
,
size
=
[
USR_JOB_DICT_SIZE
,
16
],
param_attr
=
{
'name'
:
'job_table'
},
is_sparse
=
is_sparse
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
usr_job_fc
=
layers
.
fc
(
input
=
usr_job_emb
,
size
=
16
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
concat_embed
=
layers
.
concat
(
input
=
[
usr_fc
,
usr_gender_fc
,
usr_age_fc
,
usr_job_fc
],
axis
=
1
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
usr_combined_features
=
layers
.
fc
(
input
=
concat_embed
,
size
=
200
,
act
=
"tanh"
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
return
usr_combined_features
...
...
@@ -130,8 +130,8 @@ def get_mov_combined_features():
name
=
'movie_id'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
mov_emb
=
layers
.
embedding
(
input
=
mov_id
,
...
...
@@ -139,13 +139,13 @@ def get_mov_combined_features():
size
=
[
MOV_DICT_SIZE
,
32
],
param_attr
=
{
'name'
:
'movie_table'
},
is_sparse
=
is_sparse
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
mov_fc
=
layers
.
fc
(
input
=
mov_emb
,
size
=
32
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
CATEGORY_DICT_SIZE
=
len
(
paddle
.
dataset
.
movielens
.
movie_categories
())
...
...
@@ -153,21 +153,21 @@ def get_mov_combined_features():
name
=
'category_id'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
mov_categories_emb
=
layers
.
embedding
(
input
=
category_id
,
size
=
[
CATEGORY_DICT_SIZE
,
32
],
is_sparse
=
is_sparse
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
mov_categories_hidden
=
layers
.
sequence_pool
(
input
=
mov_categories_emb
,
pool_type
=
"sum"
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
MOV_TITLE_DICT_SIZE
=
len
(
paddle
.
dataset
.
movielens
.
get_movie_title_dict
())
...
...
@@ -175,15 +175,15 @@ def get_mov_combined_features():
name
=
'movie_title'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
mov_title_emb
=
layers
.
embedding
(
input
=
mov_title_id
,
size
=
[
MOV_TITLE_DICT_SIZE
,
32
],
is_sparse
=
is_sparse
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
mov_title_conv
=
nets
.
sequence_conv_pool
(
input
=
mov_title_emb
,
...
...
@@ -191,21 +191,21 @@ def get_mov_combined_features():
filter_size
=
3
,
act
=
"tanh"
,
pool_type
=
"sum"
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
concat_embed
=
layers
.
concat
(
input
=
[
mov_fc
,
mov_categories_hidden
,
mov_title_conv
],
axis
=
1
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
# FIXME(dzh) : need tanh operator
mov_combined_features
=
layers
.
fc
(
input
=
concat_embed
,
size
=
200
,
act
=
"tanh"
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
return
mov_combined_features
...
...
@@ -218,24 +218,26 @@ def model():
inference
=
layers
.
cos_sim
(
X
=
usr_combined_features
,
Y
=
mov_combined_features
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
label
=
layers
.
data
(
name
=
'score'
,
shape
=
[
1
],
data_type
=
'float32'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
square_cost
=
layers
.
square_error_cost
(
input
=
inference
,
label
=
label
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
avg_cost
=
layers
.
mean
(
x
=
square_cost
,
program
=
program
,
init_program
=
init_program
)
x
=
square_cost
,
main_program
=
main_program
,
startup_program
=
startup_program
)
return
avg_cost
...
...
@@ -243,8 +245,8 @@ def model():
def
main
():
cost
=
model
()
sgd_optimizer
=
optimizer
.
SGDOptimizer
(
learning_rate
=
0.2
)
opts
=
sgd_optimizer
.
minimize
(
cost
,
init_program
=
init
_program
)
block
=
program
.
block
(
0
)
opts
=
sgd_optimizer
.
minimize
(
cost
,
startup_program
=
startup
_program
)
block
=
main_
program
.
block
(
0
)
if
use_gpu
:
place
=
core
.
GPUPlace
(
0
)
...
...
@@ -252,7 +254,7 @@ def main():
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
init
_program
,
feed
=
{},
fetch_list
=
[])
exe
.
run
(
startup
_program
,
feed
=
{},
fetch_list
=
[])
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
...
...
@@ -301,7 +303,7 @@ def main():
PASS_NUM
=
100
for
pass_id
in
range
(
PASS_NUM
):
for
data
in
train_reader
():
outs
=
exe
.
run
(
program
,
outs
=
exe
.
run
(
main_
program
,
feed
=
func_feed
(
feeding
,
data
),
fetch_list
=
[
cost
])
out
=
np
.
array
(
outs
[
0
])
...
...
python/paddle/v2/framework/tests/test_recurrent_op.py
浏览文件 @
51d4afaa
...
...
@@ -99,17 +99,17 @@ class RecurrentOpTest1(unittest.TestCase):
batch_size
=
1
sent_len
=
1
def
init
_program
(
self
):
self
.
program
=
Program
()
self
.
init
_program
=
Program
()
def
setup
_program
(
self
):
self
.
main_
program
=
Program
()
self
.
startup
_program
=
Program
()
self
.
p_info
=
{
"
program"
:
self
.
program
,
"
init_program"
:
self
.
init
_program
"
main_program"
:
self
.
main_
program
,
"
startup_program"
:
self
.
startup
_program
}
self
.
place
=
core
.
CPUPlace
()
def
setUp
(
self
):
self
.
init
_program
()
self
.
setup
_program
()
self
.
data_field
=
{
"x"
,
"h_boot"
}
self
.
input_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
...
...
@@ -131,7 +131,7 @@ class RecurrentOpTest1(unittest.TestCase):
name
=
'h_boot'
,
**
self
.
p_info
)
rnn
=
StaticRNN
(
program
=
self
.
program
)
rnn
=
StaticRNN
(
main_program
=
self
.
main_
program
)
with
rnn
.
step
():
h_pre
=
rnn
.
memory
(
init
=
h_boot
)
x_t
=
rnn
.
step_input
(
x
)
...
...
@@ -153,7 +153,7 @@ class RecurrentOpTest1(unittest.TestCase):
for
x
in
self
.
data_field
}
exe
=
Executor
(
self
.
place
)
out
=
exe
.
run
(
self
.
program
,
out
=
exe
.
run
(
self
.
main_
program
,
feed
=
self
.
feed_map
,
fetch_list
=
[
self
.
output
])
...
...
@@ -165,12 +165,14 @@ class RecurrentOpTest1(unittest.TestCase):
for
x
in
self
.
data_field
}
fetch_list
=
[
self
.
program
.
global_block
().
var
(
x
+
"@GRAD"
)
self
.
main_
program
.
global_block
().
var
(
x
+
"@GRAD"
)
for
x
in
self
.
data_field
]
exe
=
Executor
(
self
.
place
)
return
exe
.
run
(
self
.
program
,
feed
=
self
.
feed_map
,
fetch_list
=
fetch_list
)
return
exe
.
run
(
self
.
main_program
,
feed
=
self
.
feed_map
,
fetch_list
=
fetch_list
)
def
test_backward
(
self
):
self
.
check_forward
()
...
...
@@ -237,7 +239,7 @@ class RecurrentOpTest2(RecurrentOpTest1):
sent_len
=
2
def
setUp
(
self
):
self
.
init
_program
()
self
.
setup
_program
()
self
.
data_field
=
{
"x"
,
"h_boot"
,
"W"
,
"U"
}
...
...
@@ -260,7 +262,7 @@ class RecurrentOpTest2(RecurrentOpTest1):
name
=
'h_boot'
,
**
self
.
p_info
)
rnn
=
StaticRNN
(
program
=
self
.
program
)
rnn
=
StaticRNN
(
main_program
=
self
.
main_
program
)
with
rnn
.
step
():
h_pre
=
rnn
.
memory
(
init
=
h_boot
)
x_t
=
rnn
.
step_input
(
x
)
...
...
@@ -333,7 +335,7 @@ class RecurrentOpTest3(RecurrentOpTest1):
sent_len
=
2
def
setUp
(
self
):
self
.
init
_program
()
self
.
setup
_program
()
self
.
data_field
=
{
"x"
,
"h_boot1"
,
"h_boot2"
}
...
...
@@ -364,7 +366,7 @@ class RecurrentOpTest3(RecurrentOpTest1):
append_batch_size
=
False
,
**
self
.
p_info
)
rnn
=
StaticRNN
(
program
=
self
.
program
)
rnn
=
StaticRNN
(
main_program
=
self
.
main_
program
)
with
rnn
.
step
():
h_pre1
=
rnn
.
memory
(
init
=
h_boot1
)
h_pre2
=
rnn
.
memory
(
init
=
h_boot2
)
...
...
python/paddle/v2/framework/tests/test_understand_sentiment_conv.py
浏览文件 @
51d4afaa
...
...
@@ -4,7 +4,7 @@ import paddle.v2.framework.nets as nets
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.optimizer
as
optimizer
from
paddle.v2.framework.framework
import
Program
,
g_
program
,
g_init
_program
from
paddle.v2.framework.framework
import
Program
,
g_
main_program
,
g_startup
_program
from
paddle.v2.framework.executor
import
Executor
import
numpy
as
np
...
...
@@ -70,7 +70,7 @@ def main():
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
g_
init
_program
)
exe
.
run
(
g_
startup
_program
)
for
pass_id
in
xrange
(
PASS_NUM
):
for
data
in
train_data
():
...
...
@@ -82,7 +82,7 @@ def main():
tensor_label
=
core
.
LoDTensor
()
tensor_label
.
set
(
label
,
place
)
outs
=
exe
.
run
(
g_program
,
outs
=
exe
.
run
(
g_
main_
program
,
feed
=
{
"words"
:
tensor_words
,
"label"
:
tensor_label
},
fetch_list
=
[
cost
,
acc
])
...
...
python/paddle/v2/framework/tests/test_variable.py
浏览文件 @
51d4afaa
import
unittest
from
paddle.v2.framework.framework
import
Variable
,
g_program
,
Program
from
paddle.v2.framework.framework
import
Variable
,
g_
main_
program
,
Program
import
paddle.v2.framework.core
as
core
import
numpy
as
np
...
...
@@ -18,7 +18,7 @@ class TestVariable(unittest.TestCase):
self
.
assertRaises
(
ValueError
,
lambda
:
convert
(
"int8"
))
def
test_var
(
self
):
b
=
g_program
.
current_block
()
b
=
g_
main_
program
.
current_block
()
w
=
b
.
create_var
(
dtype
=
"float64"
,
shape
=
[
784
,
100
],
lod_level
=
0
,
name
=
"fc.w"
)
self
.
assertNotEqual
(
str
(
w
),
""
)
...
...
python/paddle/v2/framework/tests/test_word2vec.py
浏览文件 @
51d4afaa
...
...
@@ -3,13 +3,13 @@ import paddle.v2.framework.layers as layers
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.optimizer
as
optimizer
from
paddle.v2.framework.framework
import
Program
,
g_program
from
paddle.v2.framework.framework
import
Program
,
g_
main_
program
from
paddle.v2.framework.executor
import
Executor
import
numpy
as
np
init
_program
=
Program
()
program
=
Program
()
startup
_program
=
Program
()
main_
program
=
Program
()
embed_size
=
32
hidden_size
=
256
...
...
@@ -24,32 +24,32 @@ first_word = layers.data(
name
=
'firstw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
second_word
=
layers
.
data
(
name
=
'secondw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
third_word
=
layers
.
data
(
name
=
'thirdw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
forth_word
=
layers
.
data
(
name
=
'forthw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
next_word
=
layers
.
data
(
name
=
'nextw'
,
shape
=
[
1
],
data_type
=
'int64'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
embed_first
=
layers
.
embedding
(
input
=
first_word
,
...
...
@@ -57,16 +57,16 @@ embed_first = layers.embedding(
data_type
=
'float32'
,
is_sparse
=
is_sparse
,
param_attr
=
{
'name'
:
'shared_w'
},
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
embed_second
=
layers
.
embedding
(
input
=
second_word
,
size
=
[
dict_size
,
embed_size
],
data_type
=
'float32'
,
is_sparse
=
is_sparse
,
param_attr
=
{
'name'
:
'shared_w'
},
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
embed_third
=
layers
.
embedding
(
input
=
third_word
,
...
...
@@ -74,42 +74,43 @@ embed_third = layers.embedding(
data_type
=
'float32'
,
is_sparse
=
is_sparse
,
param_attr
=
{
'name'
:
'shared_w'
},
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
embed_forth
=
layers
.
embedding
(
input
=
forth_word
,
size
=
[
dict_size
,
embed_size
],
data_type
=
'float32'
,
is_sparse
=
is_sparse
,
param_attr
=
{
'name'
:
'shared_w'
},
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
concat_embed
=
layers
.
concat
(
input
=
[
embed_first
,
embed_second
,
embed_third
,
embed_forth
],
axis
=
1
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
hidden1
=
layers
.
fc
(
input
=
concat_embed
,
size
=
hidden_size
,
act
=
'sigmoid'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
predict_word
=
layers
.
fc
(
input
=
hidden1
,
size
=
dict_size
,
act
=
'softmax'
,
program
=
program
,
init_program
=
init
_program
)
main_program
=
main_
program
,
startup_program
=
startup
_program
)
cost
=
layers
.
cross_entropy
(
input
=
predict_word
,
label
=
next_word
,
program
=
program
,
init_program
=
init_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
program
=
program
,
init_program
=
init_program
)
main_program
=
main_program
,
startup_program
=
startup_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_program
=
main_program
,
startup_program
=
startup_program
)
sgd_optimizer
=
optimizer
.
SGDOptimizer
(
learning_rate
=
0.001
)
opts
=
sgd_optimizer
.
minimize
(
avg_cost
,
init
_program
)
opts
=
sgd_optimizer
.
minimize
(
avg_cost
,
startup
_program
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
imikolov
.
train
(
word_dict
,
N
),
batch_size
)
...
...
@@ -117,7 +118,7 @@ train_reader = paddle.batch(
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
init
_program
,
feed
=
{},
fetch_list
=
[])
exe
.
run
(
startup
_program
,
feed
=
{},
fetch_list
=
[])
PASS_NUM
=
100
for
pass_id
in
range
(
PASS_NUM
):
for
data
in
train_reader
():
...
...
@@ -145,7 +146,7 @@ for pass_id in range(PASS_NUM):
next_tensor
=
core
.
LoDTensor
()
next_tensor
.
set
(
next_data
,
place
)
outs
=
exe
.
run
(
program
,
outs
=
exe
.
run
(
main_
program
,
feed
=
{
'firstw'
:
first_tensor
,
'secondw'
:
second_tensor
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录