diff --git a/paddle/fluid/operators/multihead_matmul_op.cc b/paddle/fluid/operators/multihead_matmul_op.cc index b612be02b4f50ff1d50c6d8a3e1e0c5c1e9f61c6..fbf372ba6e15aca7b849a8696ac5551dc383ee51 100644 --- a/paddle/fluid/operators/multihead_matmul_op.cc +++ b/paddle/fluid/operators/multihead_matmul_op.cc @@ -134,7 +134,7 @@ MultiHeadMatMul Operator. This op is used for optimize multi head calculation in ernie model. Not suggest to use in other case except has same structure as ernie. -Example of matrix multiplication with head_number of H +Example of matrix multiplication with head_number of B - X: [B, M, K], Y: [B, K, N] => Out: [B, M, N] Both the input `Q` and `K` can carry the LoD (Level of Details) information, diff --git a/paddle/fluid/operators/multihead_matmul_op.cu b/paddle/fluid/operators/multihead_matmul_op.cu index 6e8aa712fbf00355b83bde5313ba0d04724e2ffb..b0b347123047ac8f9583f5188d52e4177392e27e 100644 --- a/paddle/fluid/operators/multihead_matmul_op.cu +++ b/paddle/fluid/operators/multihead_matmul_op.cu @@ -331,7 +331,7 @@ void MultiHeadGPUCompute(const platform::CUDADeviceContext &dev_ctx, auto stream = dev_ctx.stream(); int grid = m; - PADDLE_ENFORCE_LT(k, 1024, + PADDLE_ENFORCE_LE(k, 1024, "Input head_number * size_per_head should <= 1024"); int block = k <= 1024 ? k : 1024; add_QKV<<>>(Q, K, V, q_buf, k_buf, v_buf, bias_q,