Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
212f6eae
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
212f6eae
编写于
11月 15, 2017
作者:
L
Luo Tao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify the test config for test_CompareTwoNets.cpp
上级
3654e1e0
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
142 addition
and
346 deletion
+142
-346
paddle/gserver/tests/CMakeLists.txt
paddle/gserver/tests/CMakeLists.txt
+9
-0
paddle/gserver/tests/sequence_recurrent.py
paddle/gserver/tests/sequence_recurrent.py
+56
-0
paddle/gserver/tests/sequence_recurrent_group.py
paddle/gserver/tests/sequence_recurrent_group.py
+70
-0
paddle/gserver/tests/test_CompareTwoNets.cpp
paddle/gserver/tests/test_CompareTwoNets.cpp
+7
-4
paddle/trainer/tests/CMakeLists.txt
paddle/trainer/tests/CMakeLists.txt
+0
-8
paddle/trainer/tests/sample_trainer_config_qb_rnn.conf
paddle/trainer/tests/sample_trainer_config_qb_rnn.conf
+0
-154
paddle/trainer/tests/sample_trainer_config_rnn.conf
paddle/trainer/tests/sample_trainer_config_rnn.conf
+0
-180
未找到文件。
paddle/gserver/tests/CMakeLists.txt
浏览文件 @
212f6eae
...
...
@@ -111,3 +111,12 @@ if(NOT ON_TRAVIS)
${
CMAKE_CURRENT_BINARY_DIR
}
/test_CompareSparse
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle/
)
endif
()
################ test_CompareTwoNets ######################
add_unittest_without_exec
(
test_CompareTwoNets
test_CompareTwoNets.cpp
)
add_test
(
NAME test_CompareTwoNets
COMMAND
${
PADDLE_SOURCE_DIR
}
/paddle/.set_python_path.sh -d
${
PADDLE_SOURCE_DIR
}
/python:
${
PADDLE_SOURCE_DIR
}
/paddle/gserver/tests
${
CMAKE_CURRENT_BINARY_DIR
}
/test_CompareTwoNets
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle/
)
paddle/gserver/tests/sequence_recurrent.py
0 → 100644
浏览文件 @
212f6eae
#!/usr/bin/env python
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle.trainer_config_helpers
import
*
######################## data source ################################
dict_path
=
'gserver/tests/Sequence/tour_dict_phrase.dict'
dict_file
=
dict
()
for
line_count
,
line
in
enumerate
(
open
(
dict_path
,
"r"
)):
dict_file
[
line
.
strip
()]
=
line_count
define_py_data_sources2
(
train_list
=
'gserver/tests/Sequence/train.list'
,
test_list
=
None
,
module
=
'sequenceGen'
,
obj
=
'process'
,
args
=
{
"dict_file"
:
dict_file
})
settings
(
batch_size
=
5
)
######################## network configure ################################
dict_dim
=
len
(
open
(
dict_path
,
'r'
).
readlines
())
word_dim
=
128
hidden_dim
=
128
label_dim
=
3
# This config is designed to be equivalent with sequence_recurrent_group.py
data
=
data_layer
(
name
=
"word"
,
size
=
dict_dim
)
emb
=
embedding_layer
(
input
=
data
,
size
=
word_dim
,
param_attr
=
ParamAttr
(
name
=
"emb"
))
recurrent
=
recurrent_layer
(
input
=
emb
,
bias_attr
=
False
,
act
=
SoftmaxActivation
())
recurrent_last
=
last_seq
(
input
=
recurrent
)
with
mixed_layer
(
size
=
label_dim
,
act
=
SoftmaxActivation
(),
bias_attr
=
True
)
as
output
:
output
+=
full_matrix_projection
(
input
=
recurrent_last
)
outputs
(
classification_cost
(
input
=
output
,
label
=
data_layer
(
name
=
"label"
,
size
=
1
)))
paddle/gserver/tests/sequence_recurrent_group.py
0 → 100644
浏览文件 @
212f6eae
#!/usr/bin/env python
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle.trainer_config_helpers
import
*
######################## data source ################################
dict_path
=
'gserver/tests/Sequence/tour_dict_phrase.dict'
dict_file
=
dict
()
for
line_count
,
line
in
enumerate
(
open
(
dict_path
,
"r"
)):
dict_file
[
line
.
strip
()]
=
line_count
define_py_data_sources2
(
train_list
=
'gserver/tests/Sequence/train.list'
,
test_list
=
None
,
module
=
'sequenceGen'
,
obj
=
'process'
,
args
=
{
"dict_file"
:
dict_file
})
settings
(
batch_size
=
5
)
######################## network configure ################################
dict_dim
=
len
(
open
(
dict_path
,
'r'
).
readlines
())
word_dim
=
128
hidden_dim
=
128
label_dim
=
3
# This config is designed to be equivalent with sequence_recurrent.py
data
=
data_layer
(
name
=
"word"
,
size
=
dict_dim
)
emb
=
embedding_layer
(
input
=
data
,
size
=
word_dim
,
param_attr
=
ParamAttr
(
name
=
"emb"
))
def
step
(
y
):
mem
=
memory
(
name
=
"rnn_state"
,
size
=
hidden_dim
)
with
mixed_layer
(
name
=
"rnn_state"
,
size
=
hidden_dim
,
bias_attr
=
False
,
act
=
SoftmaxActivation
())
as
out
:
out
+=
identity_projection
(
input
=
y
)
out
+=
full_matrix_projection
(
input
=
mem
,
param_attr
=
ParamAttr
(
name
=
"___recurrent_layer_0__"
))
return
out
recurrent
=
recurrent_group
(
name
=
"rnn"
,
step
=
step
,
input
=
emb
)
recurrent_last
=
last_seq
(
input
=
recurrent
)
with
mixed_layer
(
size
=
label_dim
,
act
=
SoftmaxActivation
(),
bias_attr
=
True
)
as
output
:
output
+=
full_matrix_projection
(
input
=
recurrent_last
)
outputs
(
classification_cost
(
input
=
output
,
label
=
data_layer
(
name
=
"label"
,
size
=
1
)))
paddle/
train
er/tests/test_CompareTwoNets.cpp
→
paddle/
gserv
er/tests/test_CompareTwoNets.cpp
浏览文件 @
212f6eae
...
...
@@ -30,8 +30,6 @@ DECLARE_bool(use_gpu);
DECLARE_string
(
config
);
DECLARE_string
(
nics
);
DEFINE_string
(
config_file_a
,
""
,
"config of one network to compare"
);
DEFINE_string
(
config_file_b
,
""
,
"config of another network to compare"
);
DEFINE_bool
(
need_high_accuracy
,
false
,
"whether need to run in double accuracy"
);
...
...
@@ -42,6 +40,10 @@ DEFINE_double(
DECLARE_bool
(
thread_local_rand_use_global_seed
);
DECLARE_int32
(
seed
);
static
const
string
&
config_file_a
=
"gserver/tests/sequence_recurrent.py"
;
static
const
string
&
config_file_b
=
"gserver/tests/sequence_recurrent_group.py"
;
struct
ComData
{
vector
<
Argument
>
outArgs
;
vector
<
ParameterPtr
>
parameters
;
...
...
@@ -66,6 +68,7 @@ void calcGradient(ComData& data, const string configFile) {
DataBatch
dataBatch
;
int32_t
batchSize
=
trainer
.
getConfig
().
opt_config
().
batch_size
();
trainer
.
getDataProvider
()
->
reset
();
trainer
.
getDataProvider
()
->
setSkipShuffle
();
trainer
.
getDataProvider
()
->
getNextBatch
(
batchSize
,
&
dataBatch
);
...
...
@@ -167,11 +170,11 @@ void compareGradient(ComData& comDataA, ComData& comDataB) {
TEST
(
Trainer
,
create
)
{
ComData
dataA
;
calcGradient
(
dataA
,
FLAGS_
config_file_a
);
calcGradient
(
dataA
,
config_file_a
);
LOG
(
INFO
)
<<
"
\n\n
forwardBackward of Network A is finished
\n\n
"
;
ComData
dataB
;
calcGradient
(
dataB
,
FLAGS_
config_file_b
);
calcGradient
(
dataB
,
config_file_b
);
LOG
(
INFO
)
<<
"
\n\n
forwardBackward of the Network B is finished
\n\n
"
;
compareGradient
(
dataA
,
dataB
);
...
...
paddle/trainer/tests/CMakeLists.txt
浏览文件 @
212f6eae
...
...
@@ -28,14 +28,6 @@ if(WITH_PYTHON)
${
PADDLE_SOURCE_DIR
}
/paddle/.set_port.sh -p port
${
CMAKE_CURRENT_BINARY_DIR
}
/test_TrainerOnePass
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle/
)
endif
()
################ test_CompareTwoNets ######################
add_unittest_without_exec
(
test_CompareTwoNets
test_CompareTwoNets.cpp
)
add_test
(
NAME test_CompareTwoNets
COMMAND
${
PADDLE_SOURCE_DIR
}
/paddle/.set_python_path.sh -d
${
PADDLE_SOURCE_DIR
}
/python/
${
CMAKE_CURRENT_BINARY_DIR
}
/test_CompareTwoNets
--config_file_a=trainer/tests/sample_trainer_config_qb_rnn.conf --config_file_b=trainer/tests/sample_trainer_config_rnn.conf
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle/
)
############### test_CompareTwoOpts ###################
add_unittest_without_exec
(
test_CompareTwoOpts
...
...
paddle/trainer/tests/sample_trainer_config_qb_rnn.conf
已删除
100644 → 0
浏览文件 @
3654e1e0
#edit-mode: -*- python -*-
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Todo(luotao02) This config is only used for unitest. It is out of date now, and will be updated later.
# Note: when making change to this file, please make sure
# sample_trainer_config_rnn.conf is changed accordingly so that the uniitest
# for comparing these two nets can pass (test_CompareTwoNets)
default_initial_std
(
0
.
1
)
default_device
(
0
)
word_dim
=
1451594
l1
=
0
l2
=
0
model_type
(
"nn"
)
sparse_update
=
get_config_arg
(
"sparse_update"
,
bool
,
False
)
TrainData
(
ProtoData
(
type
=
"proto_sequence"
,
files
= (
'trainer/tests/train.list'
),
))
Settings
(
algorithm
=
'sgd'
,
batch_size
=
100
,
learning_rate
=
0
.
0001
,
learning_rate_decay_a
=
4
e
-
08
,
learning_rate_decay_b
=
0
.
0
,
learning_rate_schedule
=
'poly'
,
)
wordvec_dim
=
128
layer2_dim
=
96
layer3_dim
=
96
hidden_dim
=
128
slot_names
= [
"qb"
,
"qw"
,
"tb"
,
"tw"
]
def
ltr_network
(
network_name
,
word_dim
=
word_dim
,
wordvec_dim
=
wordvec_dim
,
layer2_dim
=
layer2_dim
,
layer3_dim
=
layer3_dim
,
hidden_dim
=
hidden_dim
,
slot_names
=
slot_names
,
l1
=
l1
,
l2
=
l2
):
slotnum
=
len
(
slot_names
)
for
i
in
xrange
(
slotnum
):
Inputs
(
slot_names
[
i
] +
network_name
)
for
i
in
xrange
(
slotnum
):
Layer
(
name
=
slot_names
[
i
] +
network_name
,
type
=
"data"
,
size
=
word_dim
,
device
= -
1
,
)
Layer
(
name
=
slot_names
[
i
] +
"_embedding_"
+
network_name
,
type
=
"mixed"
,
size
=
wordvec_dim
,
bias
=
False
,
device
= -
1
,
inputs
=
TableProjection
(
slot_names
[
i
] +
network_name
,
parameter_name
=
"embedding.w0"
,
decay_rate_l1
=
l1
,
sparse_remote_update
=
True
,
sparse_update
=
sparse_update
,
),
)
Layer
(
name
=
slot_names
[
i
] +
"_rnn1_"
+
network_name
,
type
=
"recurrent"
,
active_type
=
"tanh"
,
bias
=
Bias
(
initial_std
=
0
,
parameter_name
=
"rnn1.bias"
),
inputs
=
Input
(
slot_names
[
i
] +
"_embedding_"
+
network_name
,
parameter_name
=
"rnn1.w0"
)
)
Layer
(
name
=
slot_names
[
i
] +
"_rnnlast_"
+
network_name
,
type
=
"seqlastins"
,
inputs
= [
slot_names
[
i
] +
"_rnn1_"
+
network_name
,
],
)
Layer
(
name
=
"layer2_"
+
network_name
,
type
=
"fc"
,
active_type
=
"tanh"
,
size
=
layer2_dim
,
bias
=
Bias
(
parameter_name
=
"layer2.bias"
),
inputs
= [
Input
(
slot_name
+
"_rnnlast_"
+
network_name
,
parameter_name
=
"_layer2_"
+
slot_name
+
".w"
,
decay_rate
=
l2
,
initial_smart
=
True
)
for
slot_name
in
slot_names
]
)
Layer
(
name
=
"layer3_"
+
network_name
,
type
=
"fc"
,
active_type
=
"tanh"
,
size
=
layer3_dim
,
bias
=
Bias
(
parameter_name
=
"layer3.bias"
),
inputs
= [
Input
(
"layer2_"
+
network_name
,
parameter_name
=
"_layer3.w"
,
decay_rate
=
l2
,
initial_smart
=
True
),
]
)
Layer
(
name
=
"output_"
+
network_name
,
type
=
"fc"
,
size
=
1
,
bias
=
False
,
inputs
= [
Input
(
"layer3_"
+
network_name
,
parameter_name
=
"_layerO.w"
),
],
)
ltr_network
(
"left"
)
ltr_network
(
"right"
)
Inputs
(
"label"
)
Layer
(
name
=
"label"
,
type
=
"data"
,
size
=
1
,
)
Outputs
(
"cost"
,
"qb_rnnlast_left"
)
Layer
(
name
=
"cost"
,
type
=
"rank-cost"
,
inputs
= [
"output_left"
,
"output_right"
,
"label"
],
)
paddle/trainer/tests/sample_trainer_config_rnn.conf
已删除
100644 → 0
浏览文件 @
3654e1e0
#edit-mode: -*- python -*-
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Todo(luotao02) This config is only used for unitest. It is out of date now, and will be updated later.
# Note: when making change to this file, please make sure
# sample_trainer_config_qb_rnn.conf is changed accordingly so that the uniitest
# for comparing these two nets can pass (test_CompareTwoNets)
default_initial_std
(
0
.
1
)
default_device
(
0
)
word_dim
=
1451594
l1
=
0
l2
=
0
model_type
(
"recurrent_nn"
)
sparse_update
=
get_config_arg
(
"sparse_update"
,
bool
,
False
)
TrainData
(
ProtoData
(
type
=
"proto_sequence"
,
files
= (
'trainer/tests/train.list'
),
))
Settings
(
algorithm
=
'sgd'
,
batch_size
=
100
,
learning_rate
=
0
.
0001
,
learning_rate_decay_a
=
4
e
-
08
,
learning_rate_decay_b
=
0
.
0
,
learning_rate_schedule
=
'poly'
,
)
wordvec_dim
=
128
layer2_dim
=
96
layer3_dim
=
96
hidden_dim
=
128
slot_names
= [
"qb"
,
"qw"
,
"tb"
,
"tw"
]
def
SimpleRecurrentLayer
(
name
,
size
,
active_type
,
bias
,
input_layer_name
,
parameter_name
,
seq_reversed
=
False
):
RecurrentLayerGroupBegin
(
name
+
"_layer_group"
,
in_links
=[
input_layer_name
],
out_links
=[
name
],
seq_reversed
=
seq_reversed
)
memory_name
=
Memory
(
name
=
name
,
size
=
size
)
Layer
(
name
=
name
,
type
=
"mixed"
,
size
=
size
,
active_type
=
active_type
,
bias
=
bias
,
inputs
= [
IdentityProjection
(
input_layer_name
),
FullMatrixProjection
(
memory_name
,
parameter_name
=
parameter_name
,
),
]
)
RecurrentLayerGroupEnd
(
name
+
"_layer_group"
)
def
ltr_network
(
network_name
,
word_dim
=
word_dim
,
wordvec_dim
=
wordvec_dim
,
layer2_dim
=
layer2_dim
,
layer3_dim
=
layer3_dim
,
hidden_dim
=
hidden_dim
,
slot_names
=
slot_names
,
l1
=
l1
,
l2
=
l2
):
slotnum
=
len
(
slot_names
)
for
i
in
xrange
(
slotnum
):
Inputs
(
slot_names
[
i
] +
network_name
)
for
i
in
xrange
(
slotnum
):
Layer
(
name
=
slot_names
[
i
] +
network_name
,
type
=
"data"
,
size
=
word_dim
,
device
= -
1
,
)
Layer
(
name
=
slot_names
[
i
] +
"_embedding_"
+
network_name
,
type
=
"mixed"
,
size
=
wordvec_dim
,
bias
=
False
,
device
= -
1
,
inputs
=
TableProjection
(
slot_names
[
i
] +
network_name
,
parameter_name
=
"embedding.w0"
,
decay_rate_l1
=
l1
,
sparse_remote_update
=
True
,
sparse_update
=
sparse_update
,
),
)
SimpleRecurrentLayer
(
name
=
slot_names
[
i
] +
"_rnn1_"
+
network_name
,
size
=
hidden_dim
,
active_type
=
"tanh"
,
bias
=
Bias
(
initial_std
=
0
,
parameter_name
=
"rnn1.bias"
),
input_layer_name
=
slot_names
[
i
] +
"_embedding_"
+
network_name
,
parameter_name
=
"rnn1.w0"
,
)
Layer
(
name
=
slot_names
[
i
] +
"_rnnlast_"
+
network_name
,
type
=
"seqlastins"
,
inputs
= [
slot_names
[
i
] +
"_rnn1_"
+
network_name
,
],
)
Layer
(
name
=
"layer2_"
+
network_name
,
type
=
"fc"
,
active_type
=
"tanh"
,
size
=
layer2_dim
,
bias
=
Bias
(
parameter_name
=
"layer2.bias"
),
inputs
= [
Input
(
slot_name
+
"_rnnlast_"
+
network_name
,
parameter_name
=
"_layer2_"
+
slot_name
+
".w"
,
decay_rate
=
l2
,
initial_smart
=
True
)
for
slot_name
in
slot_names
]
)
Layer
(
name
=
"layer3_"
+
network_name
,
type
=
"fc"
,
active_type
=
"tanh"
,
size
=
layer3_dim
,
bias
=
Bias
(
parameter_name
=
"layer3.bias"
),
inputs
= [
Input
(
"layer2_"
+
network_name
,
parameter_name
=
"_layer3.w"
,
decay_rate
=
l2
,
initial_smart
=
True
),
]
)
Layer
(
name
=
"output_"
+
network_name
,
type
=
"fc"
,
size
=
1
,
bias
=
False
,
inputs
= [
Input
(
"layer3_"
+
network_name
,
parameter_name
=
"_layerO.w"
),
],
)
ltr_network
(
"left"
)
ltr_network
(
"right"
)
Inputs
(
"label"
)
Layer
(
name
=
"label"
,
type
=
"data"
,
size
=
1
,
)
Outputs
(
"cost"
,
"qb_rnnlast_left"
)
Layer
(
name
=
"cost"
,
type
=
"rank-cost"
,
inputs
= [
"output_left"
,
"output_right"
,
"label"
],
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录