Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
models
提交
a9159a8d
M
models
项目概览
PaddlePaddle
/
models
大约 2 年 前同步成功
通知
232
Star
6828
Fork
2962
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
602
列表
看板
标记
里程碑
合并请求
255
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
models
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
602
Issue
602
列表
看板
标记
里程碑
合并请求
255
合并请求
255
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a9159a8d
编写于
3月 09, 2018
作者:
G
guosheng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add initializer for Transformer.
上级
df8060e7
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
50 addition
and
24 deletion
+50
-24
fluid/neural_machine_translation/transformer/model.py
fluid/neural_machine_translation/transformer/model.py
+48
-22
fluid/neural_machine_translation/transformer/train.py
fluid/neural_machine_translation/transformer/train.py
+2
-2
未找到文件。
fluid/neural_machine_translation/transformer/model.py
浏览文件 @
a9159a8d
from
functools
import
partial
from
functools
import
partial
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.layers
as
layers
import
paddle.fluid.layers
as
layers
...
@@ -31,7 +30,7 @@ def multi_head_attention(queries,
...
@@ -31,7 +30,7 @@ def multi_head_attention(queries,
d_key
,
d_key
,
d_value
,
d_value
,
d_model
,
d_model
,
n
um_heads
=
1
,
n
_head
=
1
,
dropout_rate
=
0.
):
dropout_rate
=
0.
):
"""
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
Multi-Head Attention. Note that attn_bias is added to the logit before
...
@@ -42,41 +41,53 @@ def multi_head_attention(queries,
...
@@ -42,41 +41,53 @@ def multi_head_attention(queries,
raise
ValueError
(
raise
ValueError
(
"Inputs: quries, keys and values should all be 3-D tensors."
)
"Inputs: quries, keys and values should all be 3-D tensors."
)
def
__compute_qkv
(
queries
,
keys
,
values
,
n
um_heads
,
d_key
,
d_value
):
def
__compute_qkv
(
queries
,
keys
,
values
,
n
_head
,
d_key
,
d_value
):
"""
"""
Add linear projection to queries, keys, and values.
Add linear projection to queries, keys, and values.
"""
"""
q
=
layers
.
fc
(
input
=
queries
,
q
=
layers
.
fc
(
input
=
queries
,
size
=
d_key
*
num_heads
,
size
=
d_key
*
n_head
,
param_attr
=
fluid
.
initializer
.
Xavier
(
uniform
=
False
,
fan_in
=
d_model
*
d_key
,
fan_out
=
n_head
*
d_key
),
bias_attr
=
False
,
bias_attr
=
False
,
num_flatten_dims
=
2
)
num_flatten_dims
=
2
)
k
=
layers
.
fc
(
input
=
keys
,
k
=
layers
.
fc
(
input
=
keys
,
size
=
d_key
*
num_heads
,
size
=
d_key
*
n_head
,
param_attr
=
fluid
.
initializer
.
Xavier
(
uniform
=
False
,
fan_in
=
d_model
*
d_key
,
fan_out
=
n_head
*
d_key
),
bias_attr
=
False
,
bias_attr
=
False
,
num_flatten_dims
=
2
)
num_flatten_dims
=
2
)
v
=
layers
.
fc
(
input
=
values
,
v
=
layers
.
fc
(
input
=
values
,
size
=
d_value
*
num_heads
,
size
=
d_value
*
n_head
,
param_attr
=
fluid
.
initializer
.
Xavier
(
uniform
=
False
,
fan_in
=
d_model
*
d_value
,
fan_out
=
n_head
*
d_value
),
bias_attr
=
False
,
bias_attr
=
False
,
num_flatten_dims
=
2
)
num_flatten_dims
=
2
)
return
q
,
k
,
v
return
q
,
k
,
v
def
__split_heads
(
x
,
n
um_heads
):
def
__split_heads
(
x
,
n
_head
):
"""
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n
um_heads
* hidden_dim] then output a tensor
[bs, max_sequence_length, n
_head
* hidden_dim] then output a tensor
with shape [bs, n
um_heads
, max_sequence_length, hidden_dim].
with shape [bs, n
_head
, max_sequence_length, hidden_dim].
"""
"""
if
n
um_heads
==
1
:
if
n
_head
==
1
:
return
x
return
x
hidden_size
=
x
.
shape
[
-
1
]
hidden_size
=
x
.
shape
[
-
1
]
# FIXME(guosheng): Decouple the program desc with batch_size.
# FIXME(guosheng): Decouple the program desc with batch_size.
reshaped
=
layers
.
reshape
(
reshaped
=
layers
.
reshape
(
x
=
x
,
shape
=
[
batch_size
,
-
1
,
n
um_heads
,
hidden_size
//
num_heads
])
x
=
x
,
shape
=
[
batch_size
,
-
1
,
n
_head
,
hidden_size
//
n_head
])
# permuate the dimensions into:
# permuate the dimensions into:
# [batch_size, n
um_heads
, max_sequence_len, hidden_size_per_head]
# [batch_size, n
_head
, max_sequence_len, hidden_size_per_head]
return
layers
.
transpose
(
x
=
reshaped
,
perm
=
[
0
,
2
,
1
,
3
])
return
layers
.
transpose
(
x
=
reshaped
,
perm
=
[
0
,
2
,
1
,
3
])
def
__combine_heads
(
x
):
def
__combine_heads
(
x
):
...
@@ -95,7 +106,7 @@ def multi_head_attention(queries,
...
@@ -95,7 +106,7 @@ def multi_head_attention(queries,
shape
=
map
(
int
,
shape
=
map
(
int
,
[
batch_size
,
-
1
,
trans_x
.
shape
[
2
]
*
trans_x
.
shape
[
3
]]))
[
batch_size
,
-
1
,
trans_x
.
shape
[
2
]
*
trans_x
.
shape
[
3
]]))
def
scaled_dot_product_attention
(
q
,
k
,
v
,
attn_bias
,
d_
key
,
dropout_rate
):
def
scaled_dot_product_attention
(
q
,
k
,
v
,
attn_bias
,
d_
model
,
dropout_rate
):
"""
"""
Scaled Dot-Product Attention
Scaled Dot-Product Attention
"""
"""
...
@@ -114,7 +125,7 @@ def multi_head_attention(queries,
...
@@ -114,7 +125,7 @@ def multi_head_attention(queries,
sum_out
=
layers
.
reduce_sum
(
exp_out
,
dim
=-
1
,
keep_dim
=
False
)
sum_out
=
layers
.
reduce_sum
(
exp_out
,
dim
=-
1
,
keep_dim
=
False
)
return
layers
.
elementwise_div
(
x
=
exp_out
,
y
=
sum_out
,
axis
=
0
)
return
layers
.
elementwise_div
(
x
=
exp_out
,
y
=
sum_out
,
axis
=
0
)
scaled_q
=
layers
.
scale
(
x
=
q
,
scale
=
d_
key
**-
0.5
)
scaled_q
=
layers
.
scale
(
x
=
q
,
scale
=
d_
model
**-
0.5
)
product
=
layers
.
matmul
(
x
=
scaled_q
,
y
=
k
,
transpose_y
=
True
)
product
=
layers
.
matmul
(
x
=
scaled_q
,
y
=
k
,
transpose_y
=
True
)
weights
=
__softmax
(
layers
.
elementwise_add
(
x
=
product
,
y
=
attn_bias
))
weights
=
__softmax
(
layers
.
elementwise_add
(
x
=
product
,
y
=
attn_bias
))
if
dropout_rate
:
if
dropout_rate
:
...
@@ -123,13 +134,13 @@ def multi_head_attention(queries,
...
@@ -123,13 +134,13 @@ def multi_head_attention(queries,
out
=
layers
.
matmul
(
weights
,
v
)
out
=
layers
.
matmul
(
weights
,
v
)
return
out
return
out
q
,
k
,
v
=
__compute_qkv
(
queries
,
keys
,
values
,
n
um_heads
,
d_key
,
d_value
)
q
,
k
,
v
=
__compute_qkv
(
queries
,
keys
,
values
,
n
_head
,
d_key
,
d_value
)
q
=
__split_heads
(
q
,
n
um_heads
)
q
=
__split_heads
(
q
,
n
_head
)
k
=
__split_heads
(
k
,
n
um_heads
)
k
=
__split_heads
(
k
,
n
_head
)
v
=
__split_heads
(
v
,
n
um_heads
)
v
=
__split_heads
(
v
,
n
_head
)
ctx_multiheads
=
scaled_dot_product_attention
(
q
,
k
,
v
,
attn_bias
,
d_
key
,
ctx_multiheads
=
scaled_dot_product_attention
(
q
,
k
,
v
,
attn_bias
,
d_
model
,
dropout_rate
)
dropout_rate
)
out
=
__combine_heads
(
ctx_multiheads
)
out
=
__combine_heads
(
ctx_multiheads
)
...
@@ -137,6 +148,7 @@ def multi_head_attention(queries,
...
@@ -137,6 +148,7 @@ def multi_head_attention(queries,
# Project back to the model size.
# Project back to the model size.
proj_out
=
layers
.
fc
(
input
=
out
,
proj_out
=
layers
.
fc
(
input
=
out
,
size
=
d_model
,
size
=
d_model
,
param_attr
=
fluid
.
initializer
.
Xavier
(
uniform
=
False
),
bias_attr
=
False
,
bias_attr
=
False
,
num_flatten_dims
=
2
)
num_flatten_dims
=
2
)
return
proj_out
return
proj_out
...
@@ -151,8 +163,14 @@ def positionwise_feed_forward(x, d_inner_hid, d_hid):
...
@@ -151,8 +163,14 @@ def positionwise_feed_forward(x, d_inner_hid, d_hid):
hidden
=
layers
.
fc
(
input
=
x
,
hidden
=
layers
.
fc
(
input
=
x
,
size
=
d_inner_hid
,
size
=
d_inner_hid
,
num_flatten_dims
=
2
,
num_flatten_dims
=
2
,
param_attr
=
fluid
.
initializer
.
Uniform
(
low
=-
(
d_hid
**-
0.5
),
high
=
(
d_hid
**-
0.5
)),
act
=
"relu"
)
act
=
"relu"
)
out
=
layers
.
fc
(
input
=
hidden
,
size
=
d_hid
,
num_flatten_dims
=
2
)
out
=
layers
.
fc
(
input
=
hidden
,
size
=
d_hid
,
num_flatten_dims
=
2
,
param_attr
=
fluid
.
initializer
.
Uniform
(
low
=-
(
d_inner_hid
**-
0.5
),
high
=
(
d_inner_hid
**-
0.5
)))
return
out
return
out
...
@@ -168,7 +186,11 @@ def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.):
...
@@ -168,7 +186,11 @@ def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.):
if
cmd
==
"a"
:
# add residual connection
if
cmd
==
"a"
:
# add residual connection
out
=
out
+
prev_out
if
prev_out
else
out
out
=
out
+
prev_out
if
prev_out
else
out
elif
cmd
==
"n"
:
# add layer normalization
elif
cmd
==
"n"
:
# add layer normalization
out
=
layers
.
layer_norm
(
out
,
begin_norm_axis
=
len
(
out
.
shape
)
-
1
)
out
=
layers
.
layer_norm
(
out
,
begin_norm_axis
=
len
(
out
.
shape
)
-
1
,
param_attr
=
fluid
.
initializer
.
Constant
(
1.
),
bias_attr
=
fluid
.
initializer
.
Constant
(
0.
))
elif
cmd
==
"d"
:
# add dropout
elif
cmd
==
"d"
:
# add dropout
if
dropout
:
if
dropout
:
out
=
layers
.
dropout
(
out
,
dropout_prob
=
dropout
,
is_test
=
False
)
out
=
layers
.
dropout
(
out
,
dropout_prob
=
dropout
,
is_test
=
False
)
...
@@ -195,7 +217,10 @@ def prepare_encoder(src_word,
...
@@ -195,7 +217,10 @@ def prepare_encoder(src_word,
This module is used at the bottom of the encoder stacks.
This module is used at the bottom of the encoder stacks.
"""
"""
src_word_emb
=
layers
.
embedding
(
src_word_emb
=
layers
.
embedding
(
src_word
,
size
=
[
src_vocab_size
,
src_emb_dim
],
padding_idx
=
src_pad_idx
)
src_word
,
size
=
[
src_vocab_size
,
src_emb_dim
],
padding_idx
=
src_pad_idx
,
param_attr
=
fluid
.
initializer
.
Normal
(
0.
,
1.
))
src_pos_enc
=
layers
.
embedding
(
src_pos_enc
=
layers
.
embedding
(
src_pos
,
src_pos
,
size
=
[
src_max_len
,
src_emb_dim
],
size
=
[
src_max_len
,
src_emb_dim
],
...
@@ -462,6 +487,7 @@ def transformer(
...
@@ -462,6 +487,7 @@ def transformer(
predict
=
layers
.
reshape
(
predict
=
layers
.
reshape
(
x
=
layers
.
fc
(
input
=
dec_output
,
x
=
layers
.
fc
(
input
=
dec_output
,
size
=
trg_vocab_size
,
size
=
trg_vocab_size
,
param_attr
=
fluid
.
initializer
.
Xavier
(
uniform
=
False
),
bias_attr
=
False
,
bias_attr
=
False
,
num_flatten_dims
=
2
),
num_flatten_dims
=
2
),
shape
=
[
-
1
,
trg_vocab_size
],
shape
=
[
-
1
,
trg_vocab_size
],
...
...
fluid/neural_machine_translation/transformer/train.py
浏览文件 @
a9159a8d
...
@@ -115,7 +115,7 @@ def main():
...
@@ -115,7 +115,7 @@ def main():
paddle
.
reader
.
shuffle
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
wmt16
.
train
(
ModelHyperParams
.
src_vocab_size
,
paddle
.
dataset
.
wmt16
.
train
(
ModelHyperParams
.
src_vocab_size
,
ModelHyperParams
.
trg_vocab_size
),
ModelHyperParams
.
trg_vocab_size
),
buf_size
=
512
00
),
buf_size
=
1000
00
),
batch_size
=
TrainTaskConfig
.
batch_size
)
batch_size
=
TrainTaskConfig
.
batch_size
)
# Initialize the parameters.
# Initialize the parameters.
...
@@ -143,7 +143,7 @@ def main():
...
@@ -143,7 +143,7 @@ def main():
fetch_list
=
[
cost
])
fetch_list
=
[
cost
])
cost_val
=
np
.
array
(
outs
[
0
])
cost_val
=
np
.
array
(
outs
[
0
])
print
(
"pass_id = "
+
str
(
pass_id
)
+
" batch = "
+
str
(
batch_id
)
+
print
(
"pass_id = "
+
str
(
pass_id
)
+
" batch = "
+
str
(
batch_id
)
+
"
avg_
cost = "
+
str
(
cost_val
))
" cost = "
+
str
(
cost_val
))
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录