From 974f8f321b4dff97073944e0b5aab6118ceca610 Mon Sep 17 00:00:00 2001 From: feng_shuai Date: Tue, 1 Nov 2022 12:59:55 +0800 Subject: [PATCH] fix:add no support for cuda_arch<700 (#47509) --- paddle/fluid/operators/math/bert_encoder_functor.cu | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/math/bert_encoder_functor.cu b/paddle/fluid/operators/math/bert_encoder_functor.cu index 18e5ee845d..5b11eee61a 100644 --- a/paddle/fluid/operators/math/bert_encoder_functor.cu +++ b/paddle/fluid/operators/math/bert_encoder_functor.cu @@ -836,7 +836,13 @@ inline void MatMulWithHeadQK(const phi::GPUContext &context, FINAL_MASK); } else { if (bias_is_mask) { -#ifndef __HIPCC__ +#if defined(__HIPCC__) || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700) + PADDLE_ENFORCE_EQ(bias_is_mask, + false, + platform::errors::InvalidArgument( + "QK_bias is mask can't be supported on rocm or " + "cuda_arch<700")); +#else constexpr int ITEMS_PER_THREAD = 1; bool is_half2 = true; @@ -853,11 +859,6 @@ inline void MatMulWithHeadQK(const phi::GPUContext &context, batch_size, head_num, seq_len); -#else - PADDLE_ENFORCE_EQ(bias_is_mask, - false, - platform::errors::InvalidArgument( - "rocm can't support that QK_bias is mask")); #endif } else { SoftmaxKernelWithEltadd2<__half2><<>>( -- GitLab