CMakeLists.txt 4.8 KB
Newer Older
W
Wu Yi 已提交
1
include(operators)
D
dzhwinter 已提交
2

W
Wu Yi 已提交
3 4 5 6 7
# clean cache and pybind_file content first when rebuild
unset(GLOB_OP_LIB CACHE)
unset(OP_LIBRARY CACHE)
set(pybind_file ${PADDLE_BINARY_DIR}/paddle/fluid/pybind/pybind.h CACHE INTERNAL "pybind.h file")
file(WRITE ${pybind_file} "// Generated by the paddle/fluid/operator/CMakeLists.txt.  DO NOT EDIT!\n\n")
Y
Yu Yang 已提交
8

Q
qijun 已提交
9
add_subdirectory(math)
W
Wu Yi 已提交
10 11 12 13 14
add_subdirectory(controlflow)
add_subdirectory(detection)
add_subdirectory(elementwise)
add_subdirectory(fused)
add_subdirectory(metrics)
B
baojun 已提交
15
add_subdirectory(ngraph)
W
Wu Yi 已提交
16 17 18
add_subdirectory(optimizers)
add_subdirectory(reduce_ops)
add_subdirectory(sequence_ops)
T
tensor-tang 已提交
19
add_subdirectory(jit)
Q
QI JUN 已提交
20

T
typhoonzero 已提交
21
if(WITH_DISTRIBUTE)
22
    add_subdirectory(distributed)
W
Wu Yi 已提交
23
    add_subdirectory(distributed_ops)
24
    add_subdirectory(collective)
W
Wu Yi 已提交
25
endif()
26

W
wopeizl 已提交
27
add_subdirectory(reader)
T
typhoonzero 已提交
28

W
Wu Yi 已提交
29 30
if (NOT WIN32)
    add_subdirectory(nccl)
S
sneaxiy 已提交
31 32
endif()

33
if (WITH_GPU AND TENSORRT_FOUND)
W
Wu Yi 已提交
34
    add_subdirectory(tensorrt)
35
endif()
36

石晓伟 已提交
37 38 39 40
if (WITH_LITE)
    add_subdirectory(lite)
endif()

W
Wilber 已提交
41
SET(OP_HEADER_DEPS xxhash executor)
石晓伟 已提交
42

43 44 45 46
if (WITH_GPU)
    SET(OP_HEADER_DEPS ${OP_HEADER_DEPS} cub)
endif()

47 48 49 50 51
SET(OP_PREFETCH_DEPS "")
if (WITH_DISTRIBUTE)
    SET(OP_PREFETCH_DEPS ${OP_PREFETCH_DEPS} parameter_prefetch)
endif()

A
Aurelius84 已提交
52
SET(OP_MKL_DEPS "")
53
if (NOT WITH_MKL OR NOT WITH_AVX)
A
Aurelius84 已提交
54 55 56 57 58
    SET(OP_MKL_DEPS ${OP_MKL_DEPS} match_matrix_tensor_op)
    SET(OP_MKL_DEPS ${OP_MKL_DEPS} var_conv_2d_op)
endif()
if(WITH_COVERAGE OR NOT WITH_AVX OR WIN32)
    SET(OP_MKL_DEPS ${OP_MKL_DEPS} pyramid_hash_op)
K
Kevin 已提交
59 60
endif()

61 62
register_operators(EXCLUDES py_func_op warpctc_op dgc_op
	sync_batch_norm_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS} ${OP_PREFETCH_DEPS})
武毅 已提交
63

P
peizhilin 已提交
64
if (WITH_GPU)
Q
qingqing01 已提交
65
    # warpctc_op needs cudnn 7 above
W
Wu Yi 已提交
66 67
    if (${CUDNN_MAJOR_VERSION} VERSION_LESS 7)
        op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale SRCS warpctc_op.cc warpctc_op.cu.cc)
W
Wu Yi 已提交
68 69
    else()
        op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale)
W
Wu Yi 已提交
70
    endif()
Q
qingqing01 已提交
71
    if (NOT WIN32)
72 73 74 75
        if (WITH_NCCL)
            op_library(sync_batch_norm_op)
            file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(sync_batch_norm);\n")
        endif()
Q
qingqing01 已提交
76
    endif()
W
Wu Yi 已提交
77 78
else()
    op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale)
W
Wu Yi 已提交
79
endif()
80

81
set(COMMON_OP_DEPS ${OP_HEADER_DEPS})
82

G
gongweibao 已提交
83
if (WITH_DGC)
84 85 86 87 88
    op_library(dgc_op DEPS dgc)
    file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(dgc);\n")
    set(COMMON_OP_DEPS ${COMMON_OP_DEPS} dgc)
endif()

A
Aurelius84 已提交
89

Z
Zeng Jinle 已提交
90
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} selected_rows_functor selected_rows lod_tensor maxouting unpooling pooling lod_rank_table context_project sequence_pooling executor device_memory_aligment)
P
peizhilin 已提交
91
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} dynload_warpctc)
X
xuezhong 已提交
92
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} sequence_padding sequence_scale cos_sim_functor memory jit_kernel_helper concat_and_split cross_entropy softmax vol2col im2col sampler sample_prob tree2col)
93
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} sequence2batch lstm_compute matrix_bit_code gru_compute activation_functions beam_search fc)
H
hutuxian 已提交
94
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} box_wrapper)
W
Wu Yi 已提交
95
if (WITH_GPU)
N
nhzlx 已提交
96
  set(COMMON_OP_DEPS ${COMMON_OP_DEPS} depthwise_conv prelu)
W
Wu Yi 已提交
97
endif()
98
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} device_memory_aligment)
99
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} layer)
100

W
Wu Yi 已提交
101 102 103 104 105 106 107 108
# FIXME(typhoonzero): operator deps may not needed.
# op_library(lod_tensor_to_array_op DEPS lod_rank_table_op)
# op_library(array_to_lod_tensor_op DEPS lod_rank_table_op)
# op_library(unsqueeze_op DEPS reshape_op)
# op_library(squeeze_op DEPS reshape_op)
# op_library(flatten_op DEPS reshape_op)
# op_library(unstack_op DEPS stack_op)
# op_library(tensor_array_to_tensor_op DEPS concat_op)
B
baiyf 已提交
109

W
Wu Yi 已提交
110 111
set(OPERATOR_DEPS ${OPERATOR_DEPS} ${COMMON_OP_DEPS})
set(GLOB_OPERATOR_DEPS ${OPERATOR_DEPS} CACHE INTERNAL "Global Op dependencies")
L
Luo Tao 已提交
112

113
cc_test(assign_op_test SRCS assign_op_test.cc DEPS assign_op)
114
cc_test(gather_test SRCS gather_test.cc DEPS tensor)
Y
Yu Yang 已提交
115
cc_test(scatter_test SRCS scatter_test.cc DEPS tensor math_function)
Q
Qiao Longfei 已提交
116
cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor)
Y
Yi Wang 已提交
117
cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory)
Y
Yu Yang 已提交
118
cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op)
119
cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op)
G
gongweibao 已提交
120
nv_test(dropout_op_test SRCS dropout_op_test.cc DEPS dropout_op tensor)
Z
Zeng Jinle 已提交
121 122 123 124 125
if (WITH_GPU)
    nv_test(test_leaky_relu_grad_grad_functor SRCS test_leaky_relu_grad_grad_functor.cc test_leaky_relu_grad_grad_functor.cu DEPS tensor device_context eigen3)
else()
    cc_test(test_leaky_relu_grad_grad_functor SRCS test_leaky_relu_grad_grad_functor.cc DEPS tensor device_context eigen3)
endif()
126

S
sneaxiy 已提交
127 128 129 130
if (WITH_PYTHON)
  cc_library(py_func_op SRCS py_func_op.cc DEPS op_registry python pybind)
endif()

W
Wu Yi 已提交
131
set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library")
132
add_subdirectory(benchmark)
133 134

cc_test(op_debug_string_test SRCS op_debug_string_test.cc DEPS elementwise_add_op)