CMakeLists.txt 4.4 KB
Newer Older
W
Wu Yi 已提交
1
include(operators)
2 3 4 5
if(WITH_UNITY_BUILD)
    # Load Unity Build rules for operators in paddle/fluid/operators/fused.
    include(unity_build_rule.cmake)
endif()
6
register_operators(EXCLUDES
Z
Zhen Wang 已提交
7
    fused_bn_activation_op
8 9 10 11
    conv_fusion_op
    fusion_transpose_flatten_concat_op
    fusion_conv_inception_op
    fused_fc_elementwise_layernorm_op
12
    multihead_matmul_op
13
    skip_layernorm_op
14
    fused_embedding_eltwise_layernorm_op
A
Adam 已提交
15
    fusion_group_op
Z
Zhang Ting 已提交
16
    fusion_gru_op
17
    fusion_lstm_op
F
Feng Xing 已提交
18
    fused_bn_add_activation_op
L
Li Min 已提交
19
    fused_attention_op
20
    fused_transformer_op
21
    fused_feedforward_op
22
    resnet_unit_op)
A
Adam 已提交
23 24 25

# fusion_gru_op does not have CUDA kernel
op_library(fusion_gru_op)
26
op_library(fusion_lstm_op)
27
file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(fusion_gru);\n")
28 29
file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(fusion_lstm);\n")

30

31
if (WITH_GPU OR WITH_ROCM)
Z
Zhen Wang 已提交
32
    # fused_bn_activation_op needs cudnn 7.4.1 above
33 34
    # HIP not support bn act fuse in MIOPEN
    if ((NOT WITH_ROCM) AND (NOT ${CUDNN_VERSION} VERSION_LESS 7401))
Z
Zhen Wang 已提交
35 36 37
        op_library(fused_bn_activation_op)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_batch_norm_act);\n")
    endif()
38
    # conv_fusion_op needs cudnn 7 above
R
ronnywang 已提交
39
    if (NOT ${CUDNN_VERSION} VERSION_LESS 7100)
40 41 42 43
        op_library(conv_fusion_op)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_fusion);\n")
    endif()
    # fusion_transpose_flatten_concat_op
44 45 46 47 48
    # HIP not support cudnnTransformTensor
    if(NOT WITH_ROCM)
        op_library(fusion_transpose_flatten_concat_op)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fusion_transpose_flatten_concat);\n")
    endif()
49
    # fusion_conv_inception_op needs cudnn 7 above
50 51
    # HIP not support cudnnConvolutionBiasActivationForward
    if ((NOT WITH_ROCM) AND (NOT ${CUDNN_VERSION} VERSION_LESS 7100))
52 53 54 55 56 57 58 59 60
        op_library(fusion_conv_inception_op)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_inception_fusion);\n")
    endif()
    # fused_fc_elementwise_layernorm_op
    op_library(fused_fc_elementwise_layernorm_op)
    file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_fc_elementwise_layernorm);\n")
    # multihead_matmul_op
    op_library(multihead_matmul_op)
    file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(multihead_matmul);\n")
61 62
    op_library(skip_layernorm_op)
    file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(skip_layernorm);\n")
63 64
    op_library(fused_embedding_eltwise_layernorm_op)
    file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_embedding_eltwise_layernorm);\n")
65 66 67 68 69 70
    # fusion_group
    if(NOT APPLE AND NOT WIN32)
        op_library(fusion_group_op DEPS device_code)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fusion_group);\n")
        cc_test(test_fusion_group_op SRCS fusion_group_op_test.cc DEPS fusion_group_op)
    endif()
Z
Zhang Ting 已提交
71
    # fused_bn_add_activation
72 73 74 75
    # HIP not support bn act fuse in MIOPEN
    if ((NOT WITH_ROCM) AND (NOT ${CUDNN_VERSION} VERSION_LESS 7401))
        op_library(fused_bn_add_activation_op)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_bn_add_activation);\n")
Z
Zhang Ting 已提交
76
    endif()
77 78 79
    # fused_dropout
    # only support CUDA
    if(NOT WITH_ROCM)
80 81 82
        nv_test(test_fused_residual_dropout_bias SRCS fused_residual_dropout_bias_test.cu DEPS tensor op_registry dropout_op layer_norm_op device_context generator memory)
        nv_test(test_fused_dropout_act_bias SRCS fused_dropout_act_bias_test.cu DEPS tensor op_registry dropout_op layer_norm_op device_context generator memory)
        nv_test(test_fused_layernorm_residual_dropout_bias SRCS fused_layernorm_residual_dropout_bias_test.cu DEPS tensor op_registry dropout_op layer_norm_op device_context generator memory)
83 84 85 86 87


        op_library(fused_feedforward_op)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_feedforward);\n")

L
Li Min 已提交
88 89 90
        # fused_attention_op
        op_library(fused_attention_op)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_attention);\n")
91
    endif()
92
    # resnet_unit needs cudnn 8.0 above
93
    if ((NOT WITH_ROCM) AND (NOT ${CUDNN_VERSION} VERSION_LESS 8000))
94 95
        op_library(resnet_unit_op)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(resnet_unit);\n")
96
        cc_test(test_cudnn_norm_conv SRCS cudnn_norm_conv_test.cc DEPS conv_op blas im2col vol2col depthwise_conv eigen_function tensor op_registry device_context generator memory)
97
        cc_test(test_cudnn_bn_add_relu SRCS cudnn_bn_add_relu_test.cc DEPS batch_norm_op fused_bn_add_activation_op tensor op_registry device_context generator memory)
98
    endif()
99
endif()