CMakeLists.txt 6.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
set(kernel_declare_file
    ${PADDLE_BINARY_DIR}/paddle/phi/kernels/declarations.h.tmp
    CACHE INTERNAL "declarations.h file")
set(kernel_declare_file_final
    ${PADDLE_BINARY_DIR}/paddle/phi/kernels/declarations.h)
file(
  WRITE ${kernel_declare_file}
  "// Generated by the paddle/phi/kernels/CMakeLists.txt.  DO NOT EDIT!\n\n#pragma once\n\n"
)
file(APPEND ${kernel_declare_file}
     "#include \"paddle/phi/core/kernel_registry.h\"\n\n")
12 13 14
set(kernel_declare_file_prune
    ${PADDLE_BINARY_DIR}/paddle/phi/kernels/declarations.h.prune
    CACHE INTERNAL "declarations.h file")
15

16
# phi functors and functions called by kernels
C
Chen Weihang 已提交
17
add_subdirectory(funcs)
C
Chen Weihang 已提交
18

19 20 21
# kernel autotune
add_subdirectory(autotune)

22 23
# phi depends all phi kernel targets
set_property(GLOBAL PROPERTY PHI_KERNELS "")
24

25
# [ 1. Common kernel compilation dependencies ]
26 27
set(COMMON_KERNEL_DEPS
    dense_tensor
28
    string_tensor
29 30
    sparse_coo_tensor
    sparse_csr_tensor
31
    tensor_array
H
Huang Jiyi 已提交
32 33
    int_array
    scalar
34 35 36 37 38
    kernel_context
    kernel_factory
    arg_map_context
    convert_utils
    lod_utils
39
    custom_kernel
40
    string_infermeta
41
    phi_tensor_utils)
42 43 44 45 46 47 48 49 50
set(COMMON_KERNEL_DEPS
    ${COMMON_KERNEL_DEPS}
    eigen_function
    blas
    math_function
    im2col
    vol2col
    concat_and_split_functor
    selected_rows_functor)
51
# remove this dep after removing fluid deps on tensor creation
H
Huang Jiyi 已提交
52
set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} lod_utils)
Z
zhangkaihuo 已提交
53 54
set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} infermeta infermeta_utils
                       sparse_infermeta)
55
set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} switch_autotune)
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
set(COMMON_KERNEL_DEPS
    ${COMMON_KERNEL_DEPS}
    threadpool
    jit_kernel_helper
    softmax
    cross_entropy
    matrix_bit_code
    lapack_function
    lstm_compute
    gru_compute
    deformable_conv_functor
    matrix_reduce
    segment_pooling
    pooling
    maxouting
    matrix_inverse
73
    matrix_solve
74
    phi_dynload_warpctc
H
Hui Zhang 已提交
75
    phi_dynload_warprnnt
76
    sequence_padding
F
Feiyu Chan 已提交
77
    sequence_scale
78
    fft
79
    phi_data_layout_transform
80
    gpc
81 82
    utf8proc
    gather_scatter_functor)
83

S
ShenLiang 已提交
84 85
set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} process_group)

86 87 88 89
if(WITH_FLASHATTN)
  set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} phi_dynload_flashattn)
endif()

L
LiYuRio 已提交
90
if(WITH_NCCL OR WITH_RCCL)
91
  set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} nccl_comm_context)
92 93 94
endif()
if(WITH_GLOO)
  set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} gloo_comm_context)
L
LiYuRio 已提交
95
endif()
96 97 98
if(WITH_CUDNN_FRONTEND)
  set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} cudnn-frontend)
endif()
99
copy_if_different(${kernel_declare_file} ${kernel_declare_file_final})
100 101 102 103 104

file(GLOB kernel_h "*.h" "selected_rows/*.h" "sparse/*.h" "strings/*.h")
file(GLOB kernel_impl_h "impl/*.h" "selected_rows/impl/*.h")
file(GLOB kernel_primitive_h "primitive/*.h")

105
# fusion ops would be included here
106 107 108 109 110 111 112 113 114
file(
  GLOB
  kernel_cu
  "gpu/*.cu"
  "gpu/*.cu.cc"
  "gpudnn/*.cu"
  "kps/*.cu"
  "selected_rows/gpu/*.cu"
  "sparse/gpu/*.cu"
115 116
  "strings/gpu/*.cu"
  "fusion/gpu/*.cu")
117

118
if(WITH_CUTLASS)
119 120 121 122 123 124 125
  execute_process(
    COMMAND ${CMAKE_COMMAND} -E make_directory
            "${CMAKE_CURRENT_SOURCE_DIR}/fusion/cutlass/conv2d/generated"
    COMMAND ${PYTHON_EXECUTABLE} "conv2d_bias_act.py"
    COMMAND ${PYTHON_EXECUTABLE} "conv2d_bias_residual.py"
    WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/fusion/cutlass/conv2d")

126 127 128 129
  execute_process(
    COMMAND
      ${PYTHON_EXECUTABLE}
      ${PADDLE_SOURCE_DIR}/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/generate_kernels.py
130 131 132 133 134 135 136 137 138 139
      --cuda_arch "${NVCC_ARCH_BIN}"
    RESULT_VARIABLE memory_efficient_attention_gen_res)

  if(NOT memory_efficient_attention_gen_res EQUAL 0)
    message(
      FATAL_ERROR
        "The memory efficient attention kernel generation errors with NVCC_ARCH_BIN=${NVCC_ARCH_BIN}"
    )
  endif()

140
  file(GLOB cutlass_cu "fusion/cutlass/conv2d/generated/*.cu"
141 142 143
       "fusion/cutlass/conv2d/*.cu" "fusion/cutlass/*.cu"
       "fusion/cutlass/memory_efficient_attention/autogen/impl/*.cu")
  add_definitions("-DPADDLE_WITH_MEMORY_EFFICIENT_ATTENTION")
144 145 146
  list(APPEND kernel_cu ${cutlass_cu})
endif()

147 148 149 150 151 152 153 154 155 156 157 158
if(WITH_MKLDNN)
  file(
    GLOB
    kernel_cc
    "*.cc"
    "cpu/*.cc"
    "selected_rows/*.cc"
    "selected_rows/cpu/*.cc"
    "sparse/*.cc"
    "sparse/cpu/*.cc"
    "strings/*.cc"
    "strings/cpu/*.cc"
159 160
    "onednn/*.cc"
    "fusion/*.cc"
161
    "fusion/onednn/*.cc"
162
    "fusion/cpu/*.cc")
163 164 165 166 167 168 169 170 171 172 173
else()
  file(
    GLOB
    kernel_cc
    "*.cc"
    "cpu/*.cc"
    "selected_rows/*.cc"
    "selected_rows/cpu/*.cc"
    "sparse/*.cc"
    "sparse/cpu/*.cc"
    "strings/*.cc"
174 175 176
    "strings/cpu/*.cc"
    "fusion/*.cc"
    "fusion/cpu/*.cc")
177 178
endif()

179
file(GLOB kernel_xpu "xpu/*.cc" "selected_rows/xpu/*.cc" "fusion/xpu/*.cc")
180

181
if(WITH_MKLDNN)
182
  set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} get_kerneltype_forvar_utils)
183
endif()
184

185 186
if(WITH_GPU OR WITH_ROCM)
  if(WITH_GPU)
187
    add_library(phi_gpu ${kernel_cu} ${kernel_cc})
U
umiswing 已提交
188 189 190
    if(WITH_CUTLASS)
      add_dependencies(phi_gpu cutlass_codegen)
    endif()
191
  elseif(WITH_ROCM)
192
    hip_add_library(phi_gpu STATIC ${kernel_cu} ${kernel_cc})
193 194
  endif()
  kernel_declare("${kernel_cu}")
195
  kernel_declare("${kernel_cc}")
196
  target_link_libraries(phi_gpu ${COMMON_KERNEL_DEPS})
197
  set(ADD_PHI_KERNELS ${ADD_PHI_KERNELS} phi_gpu)
198
elseif(WITH_XPU)
199
  if(WITH_XPU_KP)
L
Leo Chen 已提交
200 201 202 203 204 205 206 207
    file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/kps/
         DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/kps/)
    file(GLOB kernel_xpu_kps "${CMAKE_CURRENT_BINARY_DIR}/kps/*.cu")
    foreach(kernel ${kernel_xpu_kps})
      get_filename_component(name ${kernel} NAME_WE)
      file(RENAME ${kernel} "${CMAKE_CURRENT_BINARY_DIR}/kps/${name}.kps")
    endforeach()
    file(GLOB kernel_xpu_kps "${CMAKE_CURRENT_BINARY_DIR}/kps/*.kps")
208 209 210 211 212 213 214 215
    xpu_add_library(
      phi_xpu
      STATIC
      ${kernel_xpu}
      ${kernel_xpu_kps}
      ${kernel_cc}
      DEPENDS
      ${COMMON_KERNEL_DEPS})
216
  else()
217
    add_library(phi_xpu ${kernel_xpu} ${kernel_cc})
218
  endif()
219 220
  kernel_declare("${kernel_xpu}")
  kernel_declare("${kernel_xpu_kps}")
221
  kernel_declare("${kernel_cc}")
222
  target_link_libraries(phi_xpu ${COMMON_KERNEL_DEPS})
223
  set(ADD_PHI_KERNELS ${ADD_PHI_KERNELS} phi_xpu)
224 225 226 227 228
else()
  add_library(phi_cpu ${kernel_cc})
  target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS})
  kernel_declare("${kernel_cc}")
  set(ADD_PHI_KERNELS phi_cpu)
229 230 231
endif()

set_property(GLOBAL PROPERTY PHI_KERNELS ${ADD_PHI_KERNELS})
232 233 234 235

if(NOT "${KERNEL_LIST}" STREQUAL "")
  prune_declaration_h()
endif()