Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
beb90756
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
beb90756
编写于
1月 24, 2018
作者:
F
fengjiayi
提交者:
peterzhang2029
1月 24, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #7824 from JiayiFeng/fix_bug
fix a bug
上级
1ab11814
ef9098a3
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
203 addition
and
1 deletion
+203
-1
python/paddle/v2/fluid/backward.py
python/paddle/v2/fluid/backward.py
+1
-1
python/paddle/v2/fluid/tests/book/test_machine_translation_encoder_context.py
...id/tests/book/test_machine_translation_encoder_context.py
+202
-0
未找到文件。
python/paddle/v2/fluid/backward.py
浏览文件 @
beb90756
...
@@ -178,7 +178,7 @@ def _remove_no_grad_branch_(op_descs, no_grad_set):
...
@@ -178,7 +178,7 @@ def _remove_no_grad_branch_(op_descs, no_grad_set):
if
_all_in_set_
(
if
_all_in_set_
(
filter
(
lambda
name
:
name
.
find
(
core
.
grad_var_suffix
())
!=
-
1
,
filter
(
lambda
name
:
name
.
find
(
core
.
grad_var_suffix
())
!=
-
1
,
op_desc
.
input_arg_names
()),
no_grad_set
):
op_desc
.
input_arg_names
()),
no_grad_set
):
no_grad_set
.
u
nion
(
out_arg_names
)
no_grad_set
.
u
pdate
(
out_arg_names
)
return
True
return
True
return
False
return
False
...
...
python/paddle/v2/fluid/tests/book/test_machine_translation_encoder_context.py
0 → 100644
浏览文件 @
beb90756
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
from
paddle.v2.fluid.executor
import
Executor
dict_size
=
30000
source_dict_dim
=
target_dict_dim
=
dict_size
src_dict
,
trg_dict
=
paddle
.
dataset
.
wmt14
.
get_dict
(
dict_size
)
hidden_dim
=
32
embedding_dim
=
16
batch_size
=
10
max_length
=
50
topk_size
=
50
encoder_size
=
decoder_size
=
hidden_dim
IS_SPARSE
=
True
USE_PEEPHOLES
=
False
def
bi_lstm_encoder
(
input_seq
,
hidden_size
):
input_forward_proj
=
fluid
.
layers
.
fc
(
input
=
input_seq
,
size
=
hidden_size
*
4
,
bias_attr
=
True
)
forward
,
_
=
fluid
.
layers
.
dynamic_lstm
(
input
=
input_forward_proj
,
size
=
hidden_size
*
4
,
use_peepholes
=
USE_PEEPHOLES
)
input_backward_proj
=
fluid
.
layers
.
fc
(
input
=
input_seq
,
size
=
hidden_size
*
4
,
bias_attr
=
True
)
backward
,
_
=
fluid
.
layers
.
dynamic_lstm
(
input
=
input_backward_proj
,
size
=
hidden_size
*
4
,
is_reverse
=
True
,
use_peepholes
=
USE_PEEPHOLES
)
return
forward
,
backward
# FIXME(peterzhang2029): Replace this function with the lstm_unit_op.
def
lstm_step
(
x_t
,
hidden_t_prev
,
cell_t_prev
,
size
):
def
linear
(
inputs
):
return
fluid
.
layers
.
fc
(
input
=
inputs
,
size
=
size
,
bias_attr
=
True
)
forget_gate
=
fluid
.
layers
.
sigmoid
(
x
=
linear
([
hidden_t_prev
,
x_t
]))
input_gate
=
fluid
.
layers
.
sigmoid
(
x
=
linear
([
hidden_t_prev
,
x_t
]))
output_gate
=
fluid
.
layers
.
sigmoid
(
x
=
linear
([
hidden_t_prev
,
x_t
]))
cell_tilde
=
fluid
.
layers
.
tanh
(
x
=
linear
([
hidden_t_prev
,
x_t
]))
cell_t
=
fluid
.
layers
.
sums
(
input
=
[
fluid
.
layers
.
elementwise_mul
(
x
=
forget_gate
,
y
=
cell_t_prev
),
fluid
.
layers
.
elementwise_mul
(
x
=
input_gate
,
y
=
cell_tilde
)
])
hidden_t
=
fluid
.
layers
.
elementwise_mul
(
x
=
output_gate
,
y
=
fluid
.
layers
.
tanh
(
x
=
cell_t
))
return
hidden_t
,
cell_t
def
lstm_decoder_without_attention
(
target_embedding
,
decoder_boot
,
context
,
decoder_size
):
rnn
=
fluid
.
layers
.
DynamicRNN
()
cell_init
=
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
decoder_boot
,
value
=
0.0
,
shape
=
[
-
1
,
decoder_size
],
dtype
=
'float32'
)
cell_init
.
stop_gradient
=
False
with
rnn
.
block
():
current_word
=
rnn
.
step_input
(
target_embedding
)
context
=
rnn
.
static_input
(
context
)
hidden_mem
=
rnn
.
memory
(
init
=
decoder_boot
,
need_reorder
=
True
)
cell_mem
=
rnn
.
memory
(
init
=
cell_init
)
decoder_inputs
=
fluid
.
layers
.
concat
(
input
=
[
context
,
current_word
],
axis
=
1
)
h
,
c
=
lstm_step
(
decoder_inputs
,
hidden_mem
,
cell_mem
,
decoder_size
)
rnn
.
update_memory
(
hidden_mem
,
h
)
rnn
.
update_memory
(
cell_mem
,
c
)
out
=
fluid
.
layers
.
fc
(
input
=
h
,
size
=
target_dict_dim
,
bias_attr
=
True
,
act
=
'softmax'
)
rnn
.
output
(
out
)
return
rnn
()
def
seq_to_seq_net
():
"""Construct a seq2seq network."""
src_word_idx
=
fluid
.
layers
.
data
(
name
=
'source_sequence'
,
shape
=
[
1
],
dtype
=
'int64'
,
lod_level
=
1
)
src_embedding
=
fluid
.
layers
.
embedding
(
input
=
src_word_idx
,
size
=
[
source_dict_dim
,
embedding_dim
],
dtype
=
'float32'
)
src_forward
,
src_backward
=
bi_lstm_encoder
(
input_seq
=
src_embedding
,
hidden_size
=
encoder_size
)
encoded_vector
=
fluid
.
layers
.
concat
(
input
=
[
src_forward
,
src_backward
],
axis
=
1
)
enc_vec_last
=
fluid
.
layers
.
sequence_last_step
(
input
=
encoded_vector
)
decoder_boot
=
fluid
.
layers
.
fc
(
input
=
enc_vec_last
,
size
=
decoder_size
,
bias_attr
=
False
,
act
=
'tanh'
)
trg_word_idx
=
fluid
.
layers
.
data
(
name
=
'target_sequence'
,
shape
=
[
1
],
dtype
=
'int64'
,
lod_level
=
1
)
trg_embedding
=
fluid
.
layers
.
embedding
(
input
=
trg_word_idx
,
size
=
[
target_dict_dim
,
embedding_dim
],
dtype
=
'float32'
)
prediction
=
lstm_decoder_without_attention
(
trg_embedding
,
decoder_boot
,
enc_vec_last
,
decoder_size
)
label
=
fluid
.
layers
.
data
(
name
=
'label_sequence'
,
shape
=
[
1
],
dtype
=
'int64'
,
lod_level
=
1
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
return
avg_cost
def
to_lodtensor
(
data
,
place
):
seq_lens
=
[
len
(
seq
)
for
seq
in
data
]
cur_len
=
0
lod
=
[
cur_len
]
for
l
in
seq_lens
:
cur_len
+=
l
lod
.
append
(
cur_len
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
res
=
core
.
LoDTensor
()
res
.
set
(
flattened_data
,
place
)
res
.
set_lod
([
lod
])
return
res
def
main
():
avg_cost
=
seq_to_seq_net
()
optimizer
=
fluid
.
optimizer
.
Adagrad
(
learning_rate
=
1e-4
)
optimizer
.
minimize
(
avg_cost
)
train_data
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
wmt14
.
train
(
dict_size
),
buf_size
=
1000
),
batch_size
=
batch_size
)
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
framework
.
default_startup_program
())
batch_id
=
0
for
pass_id
in
xrange
(
2
):
for
data
in
train_data
():
word_data
=
to_lodtensor
(
map
(
lambda
x
:
x
[
0
],
data
),
place
)
trg_word
=
to_lodtensor
(
map
(
lambda
x
:
x
[
1
],
data
),
place
)
trg_word_next
=
to_lodtensor
(
map
(
lambda
x
:
x
[
2
],
data
),
place
)
outs
=
exe
.
run
(
framework
.
default_main_program
(),
feed
=
{
'source_sequence'
:
word_data
,
'target_sequence'
:
trg_word
,
'label_sequence'
:
trg_word_next
},
fetch_list
=
[
avg_cost
])
avg_cost_val
=
np
.
array
(
outs
[
0
])
print
(
'pass_id='
+
str
(
pass_id
)
+
' batch='
+
str
(
batch_id
)
+
" avg_cost="
+
str
(
avg_cost_val
))
if
batch_id
>
3
:
exit
(
0
)
batch_id
+=
1
if
__name__
==
'__main__'
:
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录