未验证 提交 223fb7b3 编写于 作者: Y Yiqun Liu 提交者: GitHub

Fix code example of fused_attention and fused_feedforward. (#43635)

上级 4aac90ef
...@@ -386,12 +386,14 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -386,12 +386,14 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker {
.SetDefault(-1); .SetDefault(-1);
AddComment(R"DOC( AddComment(R"DOC(
Add fused attention op whose logic is as follows: The fused_attention operator is the same as following pseudo codes:
// @input: [batch_size, seq_len, 3, num_head, head_dim]
// @input: [batch_size, seq_len, embed_dim]
// @final_out: [batch_size, seq_len, num_heads, head_dim] // @final_out: [batch_size, seq_len, num_heads, head_dim]
residual = input
if (pre_layernorm) if (pre_layernorm)
out = layer_norm(input); query = layer_norm(input);
out = compute_qkv(out) + bias; out = compute_qkv(query) + qkv_bias;
// fmha module // fmha module
{ {
out = transpose(out, perm=[2, 0, 3, 1, 4]); out = transpose(out, perm=[2, 0, 3, 1, 4]);
...@@ -403,11 +405,14 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -403,11 +405,14 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker {
out = transpose(out, perm=[0, 2, 1, 3]); out = transpose(out, perm=[0, 2, 1, 3]);
} }
out = out_linear(out); // out linear
if (pre_layernorm) out = linear(out);
final_out = residual + dropout(bias + out); if add_residual:
else out = residual + dropout(out);
final_out = layer_norm(residual + dropout(bias + out)); else:
out = dropout(out);
if (!pre_layernorm)
out = layer_norm(out);
)DOC"); )DOC");
} }
}; };
......
...@@ -198,16 +198,24 @@ class FusedFeedForwardOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -198,16 +198,24 @@ class FusedFeedForwardOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("ring_id", "ring id for tensor model parallel.") AddAttr<int>("ring_id", "ring id for tensor model parallel.")
.SetDefault(-1); .SetDefault(-1);
AddComment(R"DOC( AddComment(R"DOC(
the function of fused_feedforward operator is the same as the following pseudo code: The fused_feedforward operator is the same as the following pseudo codes:
residual = src; residual = src;
ln1_out = src; if (pre_layer_norm)
if(pre_layer_norm){
ln1_out = layer_norm(src); ln1_out = layer_norm(src);
} else
out = linear(dropout(activation(dropout(linear(ln1_out))))); ln1_out = src;
if(!pre_layer_norm) { // linear 1
out = linear(ln1_out);
out = dropout(activation(out));
// linear 2
out = linear(out);
if (add_residual)
out = residual + dropout(out);
else
out = dropout(out);
if (!pre_layer_norm)
out = layer_norm(out); out = layer_norm(out);
}
)DOC"); )DOC");
} }
}; };
......
...@@ -55,12 +55,19 @@ def fused_feedforward(x, ...@@ -55,12 +55,19 @@ def fused_feedforward(x,
.. code-block:: python .. code-block:: python
residual = src; residual = x
if pre_layer_norm: if pre_layer_norm:
src = layer_norm(src) out = layer_norm1(x)
src = linear(dropout(activation(dropout(linear(src))))) else:
out = x
out = linear2(dropout1(activation(linear1(src))))
if add_residual:
out = residual + dropout2(out)
else:
out = dropout2(out)
if not pre_layer_norm: if not pre_layer_norm:
src = layer_norm(out) out = layer_norm2(out)
Args: Args:
x (Tensor): the input tensor could be 3-D tensor, the input data type could be float16, float32 or float64, the shape is`[batch\_size, sequence\_length, d_model]`. x (Tensor): the input tensor could be 3-D tensor, the input data type could be float16, float32 or float64, the shape is`[batch\_size, sequence\_length, d_model]`.
...@@ -102,15 +109,13 @@ def fused_feedforward(x, ...@@ -102,15 +109,13 @@ def fused_feedforward(x,
# required: gpu # required: gpu
import paddle import paddle
import numpy as np import paddle.incubate.nn.functional as F
x_data = np.random.random((1, 8, 8)).astype("float32")
linear1_weight_data = np.random.random((8, 8)).astype("float32") x = paddle.randn(shape=(1, 8, 8), dtype="float32")
linear2_weight_data = np.random.random((8, 8)).astype("float32") linear1_weight = paddle.randn(shape=(8, 8), dtype="float32")
x = paddle.to_tensor(x_data) linear2_weight = paddle.randn(shape=(8, 8), dtype="float32")
linear1_weight = paddle.to_tensor(linear1_weight_data) out = F.fused_feedforward(x, linear1_weight, linear2_weight)
linear2_weight = paddle.to_tensor(linear2_weight_data) print(out.shape)
out = paddle.incubate.nn.functional.fused_feedforward(x, linear1_weight, linear2_weight)
print(out.numpy().shape)
# (1, 8, 8) # (1, 8, 8)
""" """
_verify_dropout_rate(dropout1_rate) _verify_dropout_rate(dropout1_rate)
...@@ -392,27 +397,34 @@ def fused_multi_head_attention(x, ...@@ -392,27 +397,34 @@ def fused_multi_head_attention(x,
.. code-block:: python .. code-block:: python
residual = x
if pre_layer_norm: if pre_layer_norm:
out = layer_norm(x) out = layer_norm(x)
out = linear(out) + qkv) + bias
else: else:
out = linear(x) + bias out = x
# compute q, k, v
out = matmul(out, qkv_weight) + qkv_bias
out = transpose(out, perm=[2, 0, 3, 1, 4]) out = transpose(out, perm=[2, 0, 3, 1, 4])
# extract q, k and v from out. # extract q, k and v from out
q = out[0:1,::] q = out[0:1,::] * (head_dim ** -0.5)
k = out[1:2,::] k = out[1:2,::]
v = out[2:3,::] v = out[2:3,::]
out = q * k^t out = matmul(q, k, transpose_y=True)
out = attn_mask + out out = out + attn_mask
out = softmax(out) out = softmax(out)
out = dropout(out) out = dropout(out)
out = out * v out = matmul(out, v)
# combine heads
out = transpose(out, perm=[0, 2, 1, 3]) out = transpose(out, perm=[0, 2, 1, 3])
out = out_linear(out) # project to output
if pre_layer_norm: out = linear(out)
out = x + dropout(linear_bias + out) if add_residual:
out = residual + dropout(out)
else: else:
out = layer_norm(x + dropout(linear_bias + out)) out = dropout(out)
if not pre_layer_norm:
out = layer_norm(out)
Parameters: Parameters:
x (Tensor): The input tensor of fused_multi_head_attention. The shape is x (Tensor): The input tensor of fused_multi_head_attention. The shape is
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册