未验证 提交 223fb7b3 编写于 作者: Y Yiqun Liu 提交者: GitHub

Fix code example of fused_attention and fused_feedforward. (#43635)

上级 4aac90ef
...@@ -386,13 +386,15 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -386,13 +386,15 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker {
.SetDefault(-1); .SetDefault(-1);
AddComment(R"DOC( AddComment(R"DOC(
Add fused attention op whose logic is as follows: The fused_attention operator is the same as following pseudo codes:
// @input: [batch_size, seq_len, 3, num_head, head_dim]
// @input: [batch_size, seq_len, embed_dim]
// @final_out: [batch_size, seq_len, num_heads, head_dim] // @final_out: [batch_size, seq_len, num_heads, head_dim]
residual = input
if (pre_layernorm) if (pre_layernorm)
out = layer_norm(input); query = layer_norm(input);
out = compute_qkv(out) + bias; out = compute_qkv(query) + qkv_bias;
// fmha module // fmha module
{ {
out = transpose(out, perm=[2, 0, 3, 1, 4]); out = transpose(out, perm=[2, 0, 3, 1, 4]);
out = q * k^t; out = q * k^t;
...@@ -403,11 +405,14 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -403,11 +405,14 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker {
out = transpose(out, perm=[0, 2, 1, 3]); out = transpose(out, perm=[0, 2, 1, 3]);
} }
out = out_linear(out); // out linear
if (pre_layernorm) out = linear(out);
final_out = residual + dropout(bias + out); if add_residual:
else out = residual + dropout(out);
final_out = layer_norm(residual + dropout(bias + out)); else:
out = dropout(out);
if (!pre_layernorm)
out = layer_norm(out);
)DOC"); )DOC");
} }
}; };
......
...@@ -198,17 +198,25 @@ class FusedFeedForwardOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -198,17 +198,25 @@ class FusedFeedForwardOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("ring_id", "ring id for tensor model parallel.") AddAttr<int>("ring_id", "ring id for tensor model parallel.")
.SetDefault(-1); .SetDefault(-1);
AddComment(R"DOC( AddComment(R"DOC(
the function of fused_feedforward operator is the same as the following pseudo code: The fused_feedforward operator is the same as the following pseudo codes:
residual = src;
ln1_out = src; residual = src;
if(pre_layer_norm){ if (pre_layer_norm)
ln1_out = layer_norm(src); ln1_out = layer_norm(src);
} else
out = linear(dropout(activation(dropout(linear(ln1_out))))); ln1_out = src;
if(!pre_layer_norm) { // linear 1
out = layer_norm(out); out = linear(ln1_out);
} out = dropout(activation(out));
)DOC"); // linear 2
out = linear(out);
if (add_residual)
out = residual + dropout(out);
else
out = dropout(out);
if (!pre_layer_norm)
out = layer_norm(out);
)DOC");
} }
}; };
......
...@@ -55,12 +55,19 @@ def fused_feedforward(x, ...@@ -55,12 +55,19 @@ def fused_feedforward(x,
.. code-block:: python .. code-block:: python
residual = src; residual = x
if pre_layer_norm: if pre_layer_norm:
src = layer_norm(src) out = layer_norm1(x)
src = linear(dropout(activation(dropout(linear(src))))) else:
out = x
out = linear2(dropout1(activation(linear1(src))))
if add_residual:
out = residual + dropout2(out)
else:
out = dropout2(out)
if not pre_layer_norm: if not pre_layer_norm:
src = layer_norm(out) out = layer_norm2(out)
Args: Args:
x (Tensor): the input tensor could be 3-D tensor, the input data type could be float16, float32 or float64, the shape is`[batch\_size, sequence\_length, d_model]`. x (Tensor): the input tensor could be 3-D tensor, the input data type could be float16, float32 or float64, the shape is`[batch\_size, sequence\_length, d_model]`.
...@@ -102,15 +109,13 @@ def fused_feedforward(x, ...@@ -102,15 +109,13 @@ def fused_feedforward(x,
# required: gpu # required: gpu
import paddle import paddle
import numpy as np import paddle.incubate.nn.functional as F
x_data = np.random.random((1, 8, 8)).astype("float32")
linear1_weight_data = np.random.random((8, 8)).astype("float32") x = paddle.randn(shape=(1, 8, 8), dtype="float32")
linear2_weight_data = np.random.random((8, 8)).astype("float32") linear1_weight = paddle.randn(shape=(8, 8), dtype="float32")
x = paddle.to_tensor(x_data) linear2_weight = paddle.randn(shape=(8, 8), dtype="float32")
linear1_weight = paddle.to_tensor(linear1_weight_data) out = F.fused_feedforward(x, linear1_weight, linear2_weight)
linear2_weight = paddle.to_tensor(linear2_weight_data) print(out.shape)
out = paddle.incubate.nn.functional.fused_feedforward(x, linear1_weight, linear2_weight)
print(out.numpy().shape)
# (1, 8, 8) # (1, 8, 8)
""" """
_verify_dropout_rate(dropout1_rate) _verify_dropout_rate(dropout1_rate)
...@@ -392,27 +397,34 @@ def fused_multi_head_attention(x, ...@@ -392,27 +397,34 @@ def fused_multi_head_attention(x,
.. code-block:: python .. code-block:: python
if pre_layer_norm: residual = x
out = layer_norm(x) if pre_layer_norm:
out = linear(out) + qkv) + bias out = layer_norm(x)
else:
out = linear(x) + bias
out = transpose(out, perm=[2, 0, 3, 1, 4])
# extract q, k and v from out.
q = out[0:1,::]
k = out[1:2,::]
v = out[2:3,::]
out = q * k^t
out = attn_mask + out
out = softmax(out)
out = dropout(out)
out = out * v
out = transpose(out, perm=[0, 2, 1, 3])
out = out_linear(out)
if pre_layer_norm:
out = x + dropout(linear_bias + out)
else: else:
out = layer_norm(x + dropout(linear_bias + out)) out = x
# compute q, k, v
out = matmul(out, qkv_weight) + qkv_bias
out = transpose(out, perm=[2, 0, 3, 1, 4])
# extract q, k and v from out
q = out[0:1,::] * (head_dim ** -0.5)
k = out[1:2,::]
v = out[2:3,::]
out = matmul(q, k, transpose_y=True)
out = out + attn_mask
out = softmax(out)
out = dropout(out)
out = matmul(out, v)
# combine heads
out = transpose(out, perm=[0, 2, 1, 3])
# project to output
out = linear(out)
if add_residual:
out = residual + dropout(out)
else:
out = dropout(out)
if not pre_layer_norm:
out = layer_norm(out)
Parameters: Parameters:
x (Tensor): The input tensor of fused_multi_head_attention. The shape is x (Tensor): The input tensor of fused_multi_head_attention. The shape is
...@@ -420,7 +432,7 @@ def fused_multi_head_attention(x, ...@@ -420,7 +432,7 @@ def fused_multi_head_attention(x,
qkv_weight (Tensor): The qkv weight tensor. The shape is `[3, num_head, dim_head, dim_embed]`. qkv_weight (Tensor): The qkv weight tensor. The shape is `[3, num_head, dim_head, dim_embed]`.
linear_weight (Tensor): The linear weight tensor. The shape is `[embed_dim, embed_dim]`. linear_weight (Tensor): The linear weight tensor. The shape is `[embed_dim, embed_dim]`.
pre_layer_norm (bool, optional): whether it is pre_layer_norm (True) or post_layer_norm architecture pre_layer_norm (bool, optional): whether it is pre_layer_norm (True) or post_layer_norm architecture
(False). Default False. (False). Default False.
pre_ln_scale (Tensor, optional): The weight tensor of pre layernorm. Default None. pre_ln_scale (Tensor, optional): The weight tensor of pre layernorm. Default None.
pre_ln_bias (Tensor, optional): The bias tensor of pre layernorm. Default None. pre_ln_bias (Tensor, optional): The bias tensor of pre layernorm. Default None.
ln_scale (Tensor, optional): The weight tensor of layernorm. Default None. ln_scale (Tensor, optional): The weight tensor of layernorm. Default None.
...@@ -432,7 +444,7 @@ def fused_multi_head_attention(x, ...@@ -432,7 +444,7 @@ def fused_multi_head_attention(x,
linear_bias (Tensor, optional): The bias of linear. The shape is `[embed_dim]`. Default None. linear_bias (Tensor, optional): The bias of linear. The shape is `[embed_dim]`. Default None.
cache_kv (Tensor, optional): For generation model, cache structure. The shape is `[2, bsz, num_head, seq_len, head_dim]`. Default None. cache_kv (Tensor, optional): For generation model, cache structure. The shape is `[2, bsz, num_head, seq_len, head_dim]`. Default None.
attn_mask (Tensor, optional): A tensor used in multi-head attention to prevents attention to attn_mask (Tensor, optional): A tensor used in multi-head attention to prevents attention to
some unwanted positions, usually the paddings or the subsequent positions. It is a tensor some unwanted positions, usually the paddings or the subsequent positions. It is a tensor
with shape broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`. When the with shape broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`. When the
data type is bool, the unwanted positions have `False` values and the others have `True` values. data type is bool, the unwanted positions have `False` values and the others have `True` values.
When the data type is int, the unwanted positions have 0 values and the others have 1 values. When the data type is int, the unwanted positions have 0 values and the others have 1 values.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册