Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
e387cdba
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e387cdba
编写于
10月 21, 2016
作者:
A
alvations
提交者:
Yu Yang
10月 21, 2016
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Added Bidi-LSTM and DB-LSTM to quick_start demo (#226)
上级
e1f57bfd
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
137 addition
and
0 deletion
+137
-0
demo/quick_start/train.sh
demo/quick_start/train.sh
+2
-0
demo/quick_start/trainer_config.bidi-lstm.py
demo/quick_start/trainer_config.bidi-lstm.py
+62
-0
demo/quick_start/trainer_config.db-lstm.py
demo/quick_start/trainer_config.db-lstm.py
+73
-0
未找到文件。
demo/quick_start/train.sh
浏览文件 @
e387cdba
...
...
@@ -18,6 +18,8 @@ cfg=trainer_config.lr.py
#cfg=trainer_config.emb.py
#cfg=trainer_config.cnn.py
#cfg=trainer_config.lstm.py
#cfg=trainer_config.bidi-lstm.py
#cfg=trainer_config.db-lstm.py
paddle train
\
--config
=
$cfg
\
--save_dir
=
./output
\
...
...
demo/quick_start/trainer_config.bidi-lstm.py
0 → 100644
浏览文件 @
e387cdba
# edit-mode: -*- python -*-
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle.trainer_config_helpers
import
*
dict_file
=
"./data/dict.txt"
word_dict
=
dict
()
with
open
(
dict_file
,
'r'
)
as
f
:
for
i
,
line
in
enumerate
(
f
):
w
=
line
.
strip
().
split
()[
0
]
word_dict
[
w
]
=
i
is_predict
=
get_config_arg
(
'is_predict'
,
bool
,
False
)
trn
=
'data/train.list'
if
not
is_predict
else
None
tst
=
'data/test.list'
if
not
is_predict
else
'data/pred.list'
process
=
'process'
if
not
is_predict
else
'process_predict'
define_py_data_sources2
(
train_list
=
trn
,
test_list
=
tst
,
module
=
"dataprovider_emb"
,
obj
=
process
,
args
=
{
"dictionary"
:
word_dict
})
batch_size
=
128
if
not
is_predict
else
1
settings
(
batch_size
=
batch_size
,
learning_rate
=
2e-3
,
learning_method
=
AdamOptimizer
(),
regularization
=
L2Regularization
(
8e-4
),
gradient_clipping_threshold
=
25
)
bias_attr
=
ParamAttr
(
initial_std
=
0.
,
l2_rate
=
0.
)
data
=
data_layer
(
name
=
"word"
,
size
=
len
(
word_dict
))
emb
=
embedding_layer
(
input
=
data
,
size
=
128
)
bi_lstm
=
bidirectional_lstm
(
input
=
emb
,
size
=
128
)
dropout
=
dropout_layer
(
input
=
bi_lstm
,
dropout_rate
=
0.5
)
output
=
fc_layer
(
input
=
dropout
,
size
=
2
,
bias_attr
=
bias_attr
,
act
=
SoftmaxActivation
())
if
is_predict
:
maxid
=
maxid_layer
(
output
)
outputs
([
maxid
,
output
])
else
:
label
=
data_layer
(
name
=
"label"
,
size
=
2
)
cls
=
classification_cost
(
input
=
output
,
label
=
label
)
outputs
(
cls
)
demo/quick_start/trainer_config.db-lstm.py
0 → 100644
浏览文件 @
e387cdba
# edit-mode: -*- python -*-
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle.trainer_config_helpers
import
*
dict_file
=
"./data/dict.txt"
word_dict
=
dict
()
with
open
(
dict_file
,
'r'
)
as
f
:
for
i
,
line
in
enumerate
(
f
):
w
=
line
.
strip
().
split
()[
0
]
word_dict
[
w
]
=
i
is_predict
=
get_config_arg
(
'is_predict'
,
bool
,
False
)
trn
=
'data/train.list'
if
not
is_predict
else
None
tst
=
'data/test.list'
if
not
is_predict
else
'data/pred.list'
process
=
'process'
if
not
is_predict
else
'process_predict'
define_py_data_sources2
(
train_list
=
trn
,
test_list
=
tst
,
module
=
"dataprovider_emb"
,
obj
=
process
,
args
=
{
"dictionary"
:
word_dict
})
batch_size
=
128
if
not
is_predict
else
1
settings
(
batch_size
=
batch_size
,
learning_rate
=
2e-3
,
learning_method
=
AdamOptimizer
(),
regularization
=
L2Regularization
(
8e-4
),
gradient_clipping_threshold
=
25
)
bias_attr
=
ParamAttr
(
initial_std
=
0.
,
l2_rate
=
0.
)
data
=
data_layer
(
name
=
"word"
,
size
=
len
(
word_dict
))
emb
=
embedding_layer
(
input
=
data
,
size
=
128
)
hidden_0
=
mixed_layer
(
size
=
128
,
input
=
[
full_matrix_projection
(
input
=
emb
)])
lstm_0
=
lstmemory
(
input
=
hidden_0
,
layer_attr
=
ExtraAttr
(
drop_rate
=
0.1
))
input_layers
=
[
hidden_0
,
lstm_0
]
for
i
in
range
(
1
,
8
):
fc
=
fc_layer
(
input
=
input_layers
,
size
=
128
)
lstm
=
lstmemory
(
input
=
fc
,
layer_attr
=
ExtraAttr
(
drop_rate
=
0.1
),
reverse
=
(
i
%
2
)
==
1
,)
input_layers
=
[
fc
,
lstm
]
lstm_last
=
pooling_layer
(
input
=
lstm
,
pooling_type
=
MaxPooling
())
output
=
fc_layer
(
input
=
lstm_last
,
size
=
2
,
bias_attr
=
bias_attr
,
act
=
SoftmaxActivation
())
if
is_predict
:
maxid
=
maxid_layer
(
output
)
outputs
([
maxid
,
output
])
else
:
label
=
data_layer
(
name
=
"label"
,
size
=
2
)
cls
=
classification_cost
(
input
=
output
,
label
=
label
)
outputs
(
cls
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录