Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
6b1a91f9
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6b1a91f9
编写于
2月 28, 2017
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into rnn
上级
876d5974
c444708a
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
452 addition
and
45 deletion
+452
-45
demo/mnist/api_train_v2.py
demo/mnist/api_train_v2.py
+8
-11
python/paddle/v2/__init__.py
python/paddle/v2/__init__.py
+2
-1
python/paddle/v2/data_feeder.py
python/paddle/v2/data_feeder.py
+1
-1
python/paddle/v2/data_type.py
python/paddle/v2/data_type.py
+3
-3
python/paddle/v2/dataset/cifar.py
python/paddle/v2/dataset/cifar.py
+82
-0
python/paddle/v2/dataset/config.py
python/paddle/v2/dataset/config.py
+29
-1
python/paddle/v2/dataset/movielens.py
python/paddle/v2/dataset/movielens.py
+120
-0
python/paddle/v2/layer.py
python/paddle/v2/layer.py
+1
-0
python/paddle/v2/parameters.py
python/paddle/v2/parameters.py
+8
-12
python/paddle/v2/tests/CMakeLists.txt
python/paddle/v2/tests/CMakeLists.txt
+8
-2
python/paddle/v2/tests/test_topology.py
python/paddle/v2/tests/test_topology.py
+83
-0
python/paddle/v2/topology.py
python/paddle/v2/topology.py
+95
-0
python/paddle/v2/trainer.py
python/paddle/v2/trainer.py
+12
-14
未找到文件。
demo/mnist/api_train_v2.py
浏览文件 @
6b1a91f9
...
@@ -39,17 +39,14 @@ def main():
...
@@ -39,17 +39,14 @@ def main():
trainer
=
paddle
.
trainer
.
SGD
(
update_equation
=
adam_optimizer
)
trainer
=
paddle
.
trainer
.
SGD
(
update_equation
=
adam_optimizer
)
trainer
.
train
(
train_data_reader
=
train_reader
,
trainer
.
train
(
topology
=
cost
,
train_data_reader
=
train_reader
,
parameters
=
parameters
,
cost
=
cost
,
event_handler
=
event_handler
,
parameters
=
parameters
,
batch_size
=
32
,
# batch size should be refactor in Data reader
event_handler
=
event_handler
,
data_types
=
[
# data_types will be removed, It should be in
batch_size
=
32
,
# batch size should be refactor in Data reader
# network topology
reader_dict
=
{
images
.
name
:
0
,
(
'pixel'
,
images
.
type
),
label
.
name
:
1
})
(
'label'
,
label
.
type
)],
reader_dict
=
{
'pixel'
:
0
,
'label'
:
1
}
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/v2/__init__.py
浏览文件 @
6b1a91f9
...
@@ -18,6 +18,7 @@ import parameters
...
@@ -18,6 +18,7 @@ import parameters
import
trainer
import
trainer
import
event
import
event
import
data_type
import
data_type
import
topology
import
data_feeder
import
data_feeder
import
attr
import
attr
import
pooling
import
pooling
...
@@ -25,7 +26,7 @@ import py_paddle.swig_paddle as api
...
@@ -25,7 +26,7 @@ import py_paddle.swig_paddle as api
__all__
=
[
__all__
=
[
'optimizer'
,
'layer'
,
'activation'
,
'parameters'
,
'init'
,
'trainer'
,
'optimizer'
,
'layer'
,
'activation'
,
'parameters'
,
'init'
,
'trainer'
,
'event'
,
'data_type'
,
'attr'
,
'pooling'
,
'data_feeder'
'event'
,
'data_type'
,
'attr'
,
'pooling'
,
'data_feeder'
,
'topology'
]
]
...
...
python/paddle/v2/data_feeder.py
浏览文件 @
6b1a91f9
...
@@ -23,7 +23,7 @@ class DataFeeder(DataProviderConverter):
...
@@ -23,7 +23,7 @@ class DataFeeder(DataProviderConverter):
"""
"""
DataFeeder converts the data returned by paddle.reader into a data structure
DataFeeder converts the data returned by paddle.reader into a data structure
of Arguments which is defined in the API. The paddle.reader usually returns
of Arguments which is defined in the API. The paddle.reader usually returns
a list of mini-batch data entries. Each data entry in the list is one sampe.
a list of mini-batch data entries. Each data entry in the list is one samp
l
e.
Each sample is a list or a tuple with one feature or multiple features.
Each sample is a list or a tuple with one feature or multiple features.
DataFeeder converts this mini-batch data entries into Arguments in order
DataFeeder converts this mini-batch data entries into Arguments in order
to feed it to C++ interface.
to feed it to C++ interface.
...
...
python/paddle/v2/data_type.py
浏览文件 @
6b1a91f9
...
@@ -13,10 +13,10 @@
...
@@ -13,10 +13,10 @@
# limitations under the License.
# limitations under the License.
from
paddle.trainer.PyDataProvider2
import
\
from
paddle.trainer.PyDataProvider2
import
\
InputType
,
dense_vector
,
sparse_binary_vector
,
\
InputType
,
DataType
,
dense_vector
,
sparse_binary_vector
,
\
sparse_vector
,
integer_value
,
integer_value_sequence
sparse_vector
,
integer_value
,
integer_value_sequence
__all__
=
[
__all__
=
[
'InputType'
,
'
dense_vector'
,
'sparse_binary_vector'
,
'sparse
_vector'
,
'InputType'
,
'
DataType'
,
'dense_vector'
,
'sparse_binary
_vector'
,
'integer_value'
,
'integer_value_sequence'
'
sparse_vector'
,
'
integer_value'
,
'integer_value_sequence'
]
]
python/paddle/v2/dataset/cifar.py
0 → 100644
浏览文件 @
6b1a91f9
"""
CIFAR Dataset.
URL: https://www.cs.toronto.edu/~kriz/cifar.html
the default train_creator, test_creator used for CIFAR-10 dataset.
"""
import
cPickle
import
itertools
import
tarfile
import
numpy
from
config
import
download
__all__
=
[
'cifar_100_train_creator'
,
'cifar_100_test_creator'
,
'train_creator'
,
'test_creator'
]
CIFAR10_URL
=
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
CIFAR10_MD5
=
'c58f30108f718f92721af3b95e74349a'
CIFAR100_URL
=
'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
CIFAR100_MD5
=
'eb9058c3a382ffc7106e4002c42a8d85'
def
__read_batch__
(
filename
,
sub_name
):
def
reader
():
def
__read_one_batch_impl__
(
batch
):
data
=
batch
[
'data'
]
labels
=
batch
.
get
(
'labels'
,
batch
.
get
(
'fine_labels'
,
None
))
assert
labels
is
not
None
for
sample
,
label
in
itertools
.
izip
(
data
,
labels
):
yield
(
sample
/
255.0
).
astype
(
numpy
.
float32
),
int
(
label
)
with
tarfile
.
open
(
filename
,
mode
=
'r'
)
as
f
:
names
=
(
each_item
.
name
for
each_item
in
f
if
sub_name
in
each_item
.
name
)
for
name
in
names
:
batch
=
cPickle
.
load
(
f
.
extractfile
(
name
))
for
item
in
__read_one_batch_impl__
(
batch
):
yield
item
return
reader
def
cifar_100_train_creator
():
fn
=
download
(
url
=
CIFAR100_URL
,
md5
=
CIFAR100_MD5
)
return
__read_batch__
(
fn
,
'train'
)
def
cifar_100_test_creator
():
fn
=
download
(
url
=
CIFAR100_URL
,
md5
=
CIFAR100_MD5
)
return
__read_batch__
(
fn
,
'test'
)
def
train_creator
():
"""
Default train reader creator. Use CIFAR-10 dataset.
"""
fn
=
download
(
url
=
CIFAR10_URL
,
md5
=
CIFAR10_MD5
)
return
__read_batch__
(
fn
,
'data_batch'
)
def
test_creator
():
"""
Default test reader creator. Use CIFAR-10 dataset.
"""
fn
=
download
(
url
=
CIFAR10_URL
,
md5
=
CIFAR10_MD5
)
return
__read_batch__
(
fn
,
'test_batch'
)
def
unittest
():
for
_
in
train_creator
()():
pass
for
_
in
test_creator
()():
pass
if
__name__
==
'__main__'
:
unittest
()
python/paddle/v2/dataset/config.py
浏览文件 @
6b1a91f9
import
hashlib
import
os
import
os
import
shutil
import
urllib2
__all__
=
[
'DATA_HOME'
]
__all__
=
[
'DATA_HOME'
,
'download'
]
DATA_HOME
=
os
.
path
.
expanduser
(
'~/.cache/paddle_data_set'
)
DATA_HOME
=
os
.
path
.
expanduser
(
'~/.cache/paddle_data_set'
)
if
not
os
.
path
.
exists
(
DATA_HOME
):
if
not
os
.
path
.
exists
(
DATA_HOME
):
os
.
makedirs
(
DATA_HOME
)
os
.
makedirs
(
DATA_HOME
)
def
download
(
url
,
md5
):
filename
=
os
.
path
.
split
(
url
)[
-
1
]
assert
DATA_HOME
is
not
None
filepath
=
os
.
path
.
join
(
DATA_HOME
,
md5
)
if
not
os
.
path
.
exists
(
filepath
):
os
.
makedirs
(
filepath
)
__full_file__
=
os
.
path
.
join
(
filepath
,
filename
)
def
__file_ok__
():
if
not
os
.
path
.
exists
(
__full_file__
):
return
False
md5_hash
=
hashlib
.
md5
()
with
open
(
__full_file__
,
'rb'
)
as
f
:
for
chunk
in
iter
(
lambda
:
f
.
read
(
4096
),
b
""
):
md5_hash
.
update
(
chunk
)
return
md5_hash
.
hexdigest
()
==
md5
while
not
__file_ok__
():
response
=
urllib2
.
urlopen
(
url
)
with
open
(
__full_file__
,
mode
=
'wb'
)
as
of
:
shutil
.
copyfileobj
(
fsrc
=
response
,
fdst
=
of
)
return
__full_file__
python/paddle/v2/dataset/movielens.py
0 → 100644
浏览文件 @
6b1a91f9
import
zipfile
from
config
import
download
import
re
import
random
import
functools
__all__
=
[
'train_creator'
,
'test_creator'
]
class
MovieInfo
(
object
):
def
__init__
(
self
,
index
,
categories
,
title
):
self
.
index
=
int
(
index
)
self
.
categories
=
categories
self
.
title
=
title
def
value
(
self
):
return
[
self
.
index
,
[
CATEGORIES_DICT
[
c
]
for
c
in
self
.
categories
],
[
MOVIE_TITLE_DICT
[
w
.
lower
()]
for
w
in
self
.
title
.
split
()]
]
class
UserInfo
(
object
):
def
__init__
(
self
,
index
,
gender
,
age
,
job_id
):
self
.
index
=
int
(
index
)
self
.
is_male
=
gender
==
'M'
self
.
age
=
[
1
,
18
,
25
,
35
,
45
,
50
,
56
].
index
(
int
(
age
))
self
.
job_id
=
int
(
job_id
)
def
value
(
self
):
return
[
self
.
index
,
0
if
self
.
is_male
else
1
,
self
.
age
,
self
.
job_id
]
MOVIE_INFO
=
None
MOVIE_TITLE_DICT
=
None
CATEGORIES_DICT
=
None
USER_INFO
=
None
def
__initialize_meta_info__
():
fn
=
download
(
url
=
'http://files.grouplens.org/datasets/movielens/ml-1m.zip'
,
md5
=
'c4d9eecfca2ab87c1945afe126590906'
)
global
MOVIE_INFO
if
MOVIE_INFO
is
None
:
pattern
=
re
.
compile
(
r
'^(.*)\((\d+)\)$'
)
with
zipfile
.
ZipFile
(
file
=
fn
)
as
package
:
for
info
in
package
.
infolist
():
assert
isinstance
(
info
,
zipfile
.
ZipInfo
)
MOVIE_INFO
=
dict
()
title_word_set
=
set
()
categories_set
=
set
()
with
package
.
open
(
'ml-1m/movies.dat'
)
as
movie_file
:
for
i
,
line
in
enumerate
(
movie_file
):
movie_id
,
title
,
categories
=
line
.
strip
().
split
(
'::'
)
categories
=
categories
.
split
(
'|'
)
for
c
in
categories
:
categories_set
.
add
(
c
)
title
=
pattern
.
match
(
title
).
group
(
1
)
MOVIE_INFO
[
int
(
movie_id
)]
=
MovieInfo
(
index
=
movie_id
,
categories
=
categories
,
title
=
title
)
for
w
in
title
.
split
():
title_word_set
.
add
(
w
.
lower
())
global
MOVIE_TITLE_DICT
MOVIE_TITLE_DICT
=
dict
()
for
i
,
w
in
enumerate
(
title_word_set
):
MOVIE_TITLE_DICT
[
w
]
=
i
global
CATEGORIES_DICT
CATEGORIES_DICT
=
dict
()
for
i
,
c
in
enumerate
(
categories_set
):
CATEGORIES_DICT
[
c
]
=
i
global
USER_INFO
USER_INFO
=
dict
()
with
package
.
open
(
'ml-1m/users.dat'
)
as
user_file
:
for
line
in
user_file
:
uid
,
gender
,
age
,
job
,
_
=
line
.
strip
().
split
(
"::"
)
USER_INFO
[
int
(
uid
)]
=
UserInfo
(
index
=
uid
,
gender
=
gender
,
age
=
age
,
job_id
=
job
)
return
fn
def
__reader__
(
rand_seed
=
0
,
test_ratio
=
0.1
,
is_test
=
False
):
fn
=
__initialize_meta_info__
()
rand
=
random
.
Random
(
x
=
rand_seed
)
with
zipfile
.
ZipFile
(
file
=
fn
)
as
package
:
with
package
.
open
(
'ml-1m/ratings.dat'
)
as
rating
:
for
line
in
rating
:
if
(
rand
.
random
()
<
test_ratio
)
==
is_test
:
uid
,
mov_id
,
rating
,
_
=
line
.
strip
().
split
(
"::"
)
uid
=
int
(
uid
)
mov_id
=
int
(
mov_id
)
rating
=
float
(
rating
)
*
2
-
5.0
mov
=
MOVIE_INFO
[
mov_id
]
usr
=
USER_INFO
[
uid
]
yield
usr
.
value
()
+
mov
.
value
()
+
[[
rating
]]
def
__reader_creator__
(
**
kwargs
):
return
lambda
:
__reader__
(
**
kwargs
)
train_creator
=
functools
.
partial
(
__reader_creator__
,
is_test
=
False
)
test_creator
=
functools
.
partial
(
__reader_creator__
,
is_test
=
True
)
def
unittest
():
for
train_count
,
_
in
enumerate
(
train_creator
()()):
pass
for
test_count
,
_
in
enumerate
(
test_creator
()()):
pass
print
train_count
,
test_count
if
__name__
==
'__main__'
:
unittest
()
python/paddle/v2/layer.py
浏览文件 @
6b1a91f9
...
@@ -362,6 +362,7 @@ def mixed(size=0,
...
@@ -362,6 +362,7 @@ def mixed(size=0,
return
MixedLayerV2
(
size
,
input
,
name
,
act
,
bias_attr
,
layer_attr
)
return
MixedLayerV2
(
size
,
input
,
name
,
act
,
bias_attr
,
layer_attr
)
LayerV2
=
Layer
data
=
DataLayerV2
data
=
DataLayerV2
AggregateLevel
=
conf_helps
.
layers
.
AggregateLevel
AggregateLevel
=
conf_helps
.
layers
.
AggregateLevel
ExpandLevel
=
conf_helps
.
layers
.
ExpandLevel
ExpandLevel
=
conf_helps
.
layers
.
ExpandLevel
...
...
python/paddle/v2/parameters.py
浏览文件 @
6b1a91f9
import
numpy
as
np
import
numpy
as
np
from
.
import
layer
as
v2_layer
import
py_paddle.swig_paddle
as
api
import
py_paddle.swig_paddle
as
api
from
paddle.proto.ParameterConfig_pb2
import
ParameterConfig
from
paddle.proto.ParameterConfig_pb2
import
ParameterConfig
from
topology
import
Topology
__all__
=
[
'Parameters'
,
'create'
]
__all__
=
[
'Parameters'
,
'create'
]
def
create
(
*
layers
):
def
create
(
layers
):
"""
"""
Create parameter pool by layers. In paddle, layer can be represent a
Create parameter pool by topology.
model config.
:param layers:
:param layers:
:return:
:return:
"""
"""
for
layer
in
layers
:
topology
=
Topology
(
layers
)
if
not
isinstance
(
layer
,
v2_layer
.
Layer
):
raise
ValueError
(
'create must pass a topologies which type is paddle.layer.Layer'
)
model_config
=
v2_layer
.
parse_network
(
*
layers
)
pool
=
Parameters
()
pool
=
Parameters
()
for
param
in
model_config
.
parameters
:
for
param
in
topology
.
proto
()
.
parameters
:
pool
.
__append_config__
(
param
)
pool
.
__append_config__
(
param
)
return
pool
return
pool
...
@@ -224,7 +219,8 @@ class Parameters(object):
...
@@ -224,7 +219,8 @@ class Parameters(object):
except
ValueError
:
except
ValueError
:
# If no such parameter in gradient machine, then don't copy
# If no such parameter in gradient machine, then don't copy
pass
pass
self
.
__gradient_machines__
.
append
(
gradient_machine
)
self
.
__gradient_machines__
.
append
(
gradient_machine
)
def
__get_parameter_in_gradient_machine__
(
gradient_machine
,
name
):
def
__get_parameter_in_gradient_machine__
(
gradient_machine
,
name
):
...
...
python/paddle/v2/tests/CMakeLists.txt
浏览文件 @
6b1a91f9
add_test
(
NAME test_v2_api
COMMAND bash
${
PROJ_ROOT
}
/python/paddle/v2/tests/run_tests.sh
${
PYTHON_EXECUTABLE
}
)
add_test
(
NAME test_v2_layer
add_test
(
NAME test_v2_layer
COMMAND
${
PROJ_ROOT
}
/paddle/.set_python_path.sh -d
${
PROJ_ROOT
}
/python/
COMMAND
${
PROJ_ROOT
}
/paddle/.set_python_path.sh -d
${
PROJ_ROOT
}
/python/
${
PYTHON_EXECUTABLE
}
${
PROJ_ROOT
}
/python/paddle/v2/tests/test_layer.py
${
PYTHON_EXECUTABLE
}
${
PROJ_ROOT
}
/python/paddle/v2/tests/test_layer.py
...
@@ -7,5 +10,8 @@ add_test(NAME test_v2_rnn_layer
...
@@ -7,5 +10,8 @@ add_test(NAME test_v2_rnn_layer
COMMAND
${
PROJ_ROOT
}
/paddle/.set_python_path.sh -d
${
PROJ_ROOT
}
/python/
COMMAND
${
PROJ_ROOT
}
/paddle/.set_python_path.sh -d
${
PROJ_ROOT
}
/python/
${
PYTHON_EXECUTABLE
}
${
PROJ_ROOT
}
/python/paddle/v2/tests/test_rnn_layer.py
)
${
PYTHON_EXECUTABLE
}
${
PROJ_ROOT
}
/python/paddle/v2/tests/test_rnn_layer.py
)
add_test
(
NAME test_v2_api
COMMAND bash
${
PROJ_ROOT
}
/python/paddle/v2/tests/run_tests.sh
${
PYTHON_EXECUTABLE
}
)
add_test
(
NAME test_topology
COMMAND
${
PROJ_ROOT
}
/paddle/.set_python_path.sh -d
${
PROJ_ROOT
}
/python/
${
PYTHON_EXECUTABLE
}
${
PROJ_ROOT
}
/python/paddle/v2/tests/test_topology.py
WORKING_DIRECTORY
${
PROJ_ROOT
}
/python/paddle
)
python/paddle/v2/tests/test_topology.py
0 → 100644
浏览文件 @
6b1a91f9
# Copyright PaddlePaddle contributors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
paddle.v2.layer
as
layer
import
paddle.v2.topology
as
topology
import
paddle.v2.data_type
as
data_type
import
paddle.trainer_config_helpers
as
conf_helps
class
TestTopology
(
unittest
.
TestCase
):
def
test_data_type
(
self
):
pixel
=
layer
.
data
(
name
=
'pixel'
,
type
=
data_type
.
dense_vector
(
784
))
label
=
layer
.
data
(
name
=
'label'
,
type
=
data_type
.
integer_value
(
10
))
hidden
=
layer
.
fc
(
input
=
pixel
,
size
=
100
,
act
=
conf_helps
.
SigmoidActivation
())
inference
=
layer
.
fc
(
input
=
hidden
,
size
=
10
,
act
=
conf_helps
.
SoftmaxActivation
())
cost
=
layer
.
classification_cost
(
input
=
inference
,
label
=
label
)
topo
=
topology
.
Topology
(
cost
)
data_types
=
topo
.
data_type
()
self
.
assertEqual
(
len
(
data_types
),
2
)
pixel_data_type
=
filter
(
lambda
type
:
type
[
0
]
==
"pixel"
,
data_types
)
self
.
assertEqual
(
len
(
pixel_data_type
),
1
)
pixel_data_type
=
pixel_data_type
[
0
]
self
.
assertEqual
(
pixel_data_type
[
1
].
type
,
data_type
.
DataType
.
Dense
)
self
.
assertEqual
(
pixel_data_type
[
1
].
dim
,
784
)
label_data_type
=
filter
(
lambda
type
:
type
[
0
]
==
"label"
,
data_types
)
self
.
assertEqual
(
len
(
label_data_type
),
1
)
label_data_type
=
label_data_type
[
0
]
self
.
assertEqual
(
label_data_type
[
1
].
type
,
data_type
.
DataType
.
Index
)
self
.
assertEqual
(
label_data_type
[
1
].
dim
,
10
)
def
test_get_layer
(
self
):
pixel
=
layer
.
data
(
name
=
'pixel'
,
type
=
data_type
.
dense_vector
(
784
))
label
=
layer
.
data
(
name
=
'label'
,
type
=
data_type
.
integer_value
(
10
))
hidden
=
layer
.
fc
(
input
=
pixel
,
size
=
100
,
act
=
conf_helps
.
SigmoidActivation
())
inference
=
layer
.
fc
(
input
=
hidden
,
size
=
10
,
act
=
conf_helps
.
SoftmaxActivation
())
cost
=
layer
.
classification_cost
(
input
=
inference
,
label
=
label
)
topo
=
topology
.
Topology
(
cost
)
pixel_layer
=
topo
.
get_layer
(
"pixel"
)
label_layer
=
topo
.
get_layer
(
"label"
)
self
.
assertEqual
(
pixel_layer
,
pixel
)
self
.
assertEqual
(
label_layer
,
label
)
def
test_parse
(
self
):
pixel
=
layer
.
data
(
name
=
'pixel'
,
type
=
data_type
.
dense_vector
(
784
))
label
=
layer
.
data
(
name
=
'label'
,
type
=
data_type
.
integer_value
(
10
))
hidden
=
layer
.
fc
(
input
=
pixel
,
size
=
100
,
act
=
conf_helps
.
SigmoidActivation
())
inference
=
layer
.
fc
(
input
=
hidden
,
size
=
10
,
act
=
conf_helps
.
SoftmaxActivation
())
maxid
=
layer
.
max_id
(
input
=
inference
)
cost1
=
layer
.
classification_cost
(
input
=
inference
,
label
=
label
)
cost2
=
layer
.
cross_entropy_cost
(
input
=
inference
,
label
=
label
)
topology
.
Topology
(
cost2
).
proto
()
topology
.
Topology
([
cost1
]).
proto
()
topology
.
Topology
([
cost1
,
cost2
]).
proto
()
topology
.
Topology
([
inference
,
maxid
]).
proto
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/v2/topology.py
0 → 100644
浏览文件 @
6b1a91f9
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
collections
from
paddle.proto.ModelConfig_pb2
import
ModelConfig
import
layer
as
v2_layer
__all__
=
[
'Topology'
]
class
Topology
(
object
):
"""
Topology is used to store the information about all layers
and network configs.
"""
def
__init__
(
self
,
layers
):
if
not
isinstance
(
layers
,
collections
.
Sequence
):
__check_layer_type__
(
layers
)
layers
=
[
layers
]
for
layer
in
layers
:
__check_layer_type__
(
layer
)
self
.
layers
=
layers
self
.
__model_config__
=
v2_layer
.
parse_network
(
*
layers
)
assert
isinstance
(
self
.
__model_config__
,
ModelConfig
)
def
proto
(
self
):
return
self
.
__model_config__
def
get_layer
(
self
,
name
):
"""
get v2.Layer Class instance by layer name
:param name:
:return:
"""
result_layer
=
[]
def
find_layer_by_name
(
layer
,
layer_name
):
if
len
(
result_layer
)
==
1
:
return
elif
layer
.
name
==
layer_name
:
result_layer
.
append
(
layer
)
else
:
for
parent_layer
in
layer
.
__parent_layers__
.
values
():
find_layer_by_name
(
parent_layer
,
layer_name
)
for
layer
in
self
.
layers
:
find_layer_by_name
(
layer
,
name
)
assert
len
(
result_layer
)
==
1
return
result_layer
[
0
]
def
data_layers
(
self
):
"""
get all data layer
:return:
"""
data_layers
=
set
()
def
find_data_layer
(
layer
):
if
isinstance
(
layer
,
v2_layer
.
DataLayerV2
):
data_layers
.
add
(
layer
)
for
parent_layer
in
layer
.
__parent_layers__
.
values
():
find_data_layer
(
parent_layer
)
for
layer
in
self
.
layers
:
find_data_layer
(
layer
)
return
data_layers
def
data_type
(
self
):
"""
get data_type from proto, such as:
[('image', dense_vector(768)), ('label', integer_value(10))]
"""
return
[(
data_layer
.
name
,
data_layer
.
type
)
for
data_layer
in
self
.
data_layers
()]
def
__check_layer_type__
(
layer
):
if
not
isinstance
(
layer
,
v2_layer
.
LayerV2
):
raise
ValueError
(
'layer should have type paddle.layer.Layer'
)
python/paddle/v2/trainer.py
浏览文件 @
6b1a91f9
import
collections
import
collections
import
py_paddle.swig_paddle
as
api
import
py_paddle.swig_paddle
as
api
from
paddle.proto.ModelConfig_pb2
import
ModelConfig
from
data_feeder
import
DataFeeder
from
data_feeder
import
DataFeeder
from
topology
import
Topology
from
.
import
event
as
v2_event
from
.
import
event
as
v2_event
from
.
import
layer
as
v2_layer
from
.
import
optimizer
as
v2_optimizer
from
.
import
optimizer
as
v2_optimizer
from
.
import
parameters
as
v2_parameters
from
.
import
parameters
as
v2_parameters
...
@@ -30,7 +29,7 @@ class ITrainer(object):
...
@@ -30,7 +29,7 @@ class ITrainer(object):
def
train
(
self
,
def
train
(
self
,
train_data_reader
,
train_data_reader
,
topology
,
cost
,
parameters
,
parameters
,
test_data_reader
=
None
,
test_data_reader
=
None
,
event_handler
=
None
):
event_handler
=
None
):
...
@@ -38,7 +37,7 @@ class ITrainer(object):
...
@@ -38,7 +37,7 @@ class ITrainer(object):
train method.
train method.
:param train_data_reader:
:param train_data_reader:
:param
topology
:
:param
cost
:
:param parameters:
:param parameters:
:param test_data_reader:
:param test_data_reader:
:param event_handler:
:param event_handler:
...
@@ -63,19 +62,18 @@ class SGD(ITrainer):
...
@@ -63,19 +62,18 @@ class SGD(ITrainer):
def
train
(
self
,
def
train
(
self
,
train_data_reader
,
train_data_reader
,
topology
,
cost
,
parameters
,
parameters
,
num_passes
=
1
,
num_passes
=
1
,
test_data_reader
=
None
,
test_data_reader
=
None
,
event_handler
=
None
,
event_handler
=
None
,
batch_size
=
32
,
batch_size
=
32
,
data_types
=
None
,
reader_dict
=
None
):
reader_dict
=
None
):
"""
"""
Training method. Will train num_passes of input data.
Training method. Will train num_passes of input data.
:param train_data_reader:
:param train_data_reader:
:param
topology: Network Topology, use one or more Layers to represent it
.
:param
cost: cost layers, to be optimized
.
:param parameters: The parameter pools.
:param parameters: The parameter pools.
:param num_passes: The total train passes.
:param num_passes: The total train passes.
:param test_data_reader:
:param test_data_reader:
...
@@ -83,18 +81,18 @@ class SGD(ITrainer):
...
@@ -83,18 +81,18 @@ class SGD(ITrainer):
occurred.
occurred.
:type event_handler: (BaseEvent) => None
:type event_handler: (BaseEvent) => None
:param batch_size: Not important, will be removed after data refactor.
:param batch_size: Not important, will be removed after data refactor.
:param data_types: Not important, will be removed after data refactor.
:return:
:return:
"""
"""
if
event_handler
is
None
:
if
event_handler
is
None
:
event_handler
=
default_event_handler
event_handler
=
default_event_handler
topology
=
v2_layer
.
parse_network
(
topology
)
topology
=
Topology
(
cost
)
__check_train_args__
(
**
locals
())
__check_train_args__
(
**
locals
())
gm
=
api
.
GradientMachine
.
createFromConfigProto
(
gm
=
api
.
GradientMachine
.
createFromConfigProto
(
topology
,
api
.
CREATE_MODE_NORMAL
,
self
.
__optimizer__
.
enable_types
())
topology
.
proto
(),
api
.
CREATE_MODE_NORMAL
,
self
.
__optimizer__
.
enable_types
())
assert
isinstance
(
gm
,
api
.
GradientMachine
)
assert
isinstance
(
gm
,
api
.
GradientMachine
)
parameters
.
append_gradient_machine
(
gm
)
parameters
.
append_gradient_machine
(
gm
)
gm
.
randParameters
()
gm
.
randParameters
()
...
@@ -108,7 +106,7 @@ class SGD(ITrainer):
...
@@ -108,7 +106,7 @@ class SGD(ITrainer):
assert
isinstance
(
pass_evaluator
,
api
.
Evaluator
)
assert
isinstance
(
pass_evaluator
,
api
.
Evaluator
)
out_args
=
api
.
Arguments
.
createArguments
(
0
)
out_args
=
api
.
Arguments
.
createArguments
(
0
)
feeder
=
DataFeeder
(
data_types
,
reader_dict
)
feeder
=
DataFeeder
(
topology
.
data_type
()
,
reader_dict
)
for
pass_id
in
xrange
(
num_passes
):
for
pass_id
in
xrange
(
num_passes
):
event_handler
(
v2_event
.
BeginPass
(
pass_id
))
event_handler
(
v2_event
.
BeginPass
(
pass_id
))
...
@@ -154,7 +152,7 @@ def __data_reader_to_batch__(reader, batch_size, topology):
...
@@ -154,7 +152,7 @@ def __data_reader_to_batch__(reader, batch_size, topology):
def
input_reorder
(
func
):
def
input_reorder
(
func
):
for
item
in
func
():
for
item
in
func
():
retv
=
[]
retv
=
[]
for
__layer_name__
in
topology
.
input_layer_names
:
for
__layer_name__
in
topology
.
proto
().
input_layer_names
:
retv
.
append
(
item
[
__layer_name__
])
retv
.
append
(
item
[
__layer_name__
])
yield
retv
yield
retv
...
@@ -191,7 +189,7 @@ def __check_train_args__(train_data_reader, topology, parameters,
...
@@ -191,7 +189,7 @@ def __check_train_args__(train_data_reader, topology, parameters,
raise
ValueError
(
'test_data_reader should be a function, which can '
raise
ValueError
(
'test_data_reader should be a function, which can '
'return a iterator'
)
'return a iterator'
)
if
not
isinstance
(
topology
,
ModelConfig
):
if
not
isinstance
(
topology
,
Topology
):
raise
ValueError
(
'topology should be a model config'
)
raise
ValueError
(
'topology should be a model config'
)
if
not
isinstance
(
parameters
,
v2_parameters
.
Parameters
):
if
not
isinstance
(
parameters
,
v2_parameters
.
Parameters
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录