CMakeLists.txt 4.8 KB
Newer Older
W
Wu Yi 已提交
1
include(operators)
D
dzhwinter 已提交
2

W
Wu Yi 已提交
3 4 5 6 7
# clean cache and pybind_file content first when rebuild
unset(GLOB_OP_LIB CACHE)
unset(OP_LIBRARY CACHE)
set(pybind_file ${PADDLE_BINARY_DIR}/paddle/fluid/pybind/pybind.h CACHE INTERNAL "pybind.h file")
file(WRITE ${pybind_file} "// Generated by the paddle/fluid/operator/CMakeLists.txt.  DO NOT EDIT!\n\n")
Y
Yu Yang 已提交
8

Q
qijun 已提交
9
add_subdirectory(math)
W
Wu Yi 已提交
10 11 12 13 14
add_subdirectory(controlflow)
add_subdirectory(detection)
add_subdirectory(elementwise)
add_subdirectory(fused)
add_subdirectory(metrics)
B
baojun 已提交
15
add_subdirectory(ngraph)
W
Wu Yi 已提交
16 17 18
add_subdirectory(optimizers)
add_subdirectory(reduce_ops)
add_subdirectory(sequence_ops)
T
tensor-tang 已提交
19
add_subdirectory(jit)
Q
QI JUN 已提交
20

T
typhoonzero 已提交
21
if(WITH_DISTRIBUTE)
22
    add_subdirectory(distributed)
W
Wu Yi 已提交
23
    add_subdirectory(distributed_ops)
24
    add_subdirectory(collective)
W
Wu Yi 已提交
25
endif()
26

W
wopeizl 已提交
27
add_subdirectory(reader)
T
typhoonzero 已提交
28

W
Wu Yi 已提交
29 30
if (NOT WIN32)
    add_subdirectory(nccl)
S
sneaxiy 已提交
31 32
endif()

33
if (WITH_GPU AND TENSORRT_FOUND)
W
Wu Yi 已提交
34
    add_subdirectory(tensorrt)
35
endif()
36

37
if (ANAKIN_SUBGRAPH) 
38 39 40
    add_subdirectory(anakin)
endif()

石晓伟 已提交
41 42 43 44
if (WITH_LITE)
    add_subdirectory(lite)
endif()

W
Wilber 已提交
45
SET(OP_HEADER_DEPS xxhash executor)
石晓伟 已提交
46

47 48 49 50
if (WITH_GPU)
    SET(OP_HEADER_DEPS ${OP_HEADER_DEPS} cub)
endif()

51 52 53 54 55
SET(OP_PREFETCH_DEPS "")
if (WITH_DISTRIBUTE)
    SET(OP_PREFETCH_DEPS ${OP_PREFETCH_DEPS} parameter_prefetch)
endif()

A
Aurelius84 已提交
56
SET(OP_MKL_DEPS "")
57
if (NOT WITH_MKL OR NOT WITH_AVX)
A
Aurelius84 已提交
58 59 60 61 62
    SET(OP_MKL_DEPS ${OP_MKL_DEPS} match_matrix_tensor_op)
    SET(OP_MKL_DEPS ${OP_MKL_DEPS} var_conv_2d_op)
endif()
if(WITH_COVERAGE OR NOT WITH_AVX OR WIN32)
    SET(OP_MKL_DEPS ${OP_MKL_DEPS} pyramid_hash_op)
K
Kevin 已提交
63 64
endif()

65 66
register_operators(EXCLUDES py_func_op warpctc_op dgc_op
	sync_batch_norm_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS} ${OP_PREFETCH_DEPS})
武毅 已提交
67

P
peizhilin 已提交
68
if (WITH_GPU)
Q
qingqing01 已提交
69
    # warpctc_op needs cudnn 7 above
W
Wu Yi 已提交
70 71
    if (${CUDNN_MAJOR_VERSION} VERSION_LESS 7)
        op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale SRCS warpctc_op.cc warpctc_op.cu.cc)
W
Wu Yi 已提交
72 73
    else()
        op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale)
W
Wu Yi 已提交
74
    endif()
Q
qingqing01 已提交
75 76 77 78
    if (NOT WIN32)
        op_library(sync_batch_norm_op)
        file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(sync_batch_norm);\n")
    endif()
W
Wu Yi 已提交
79 80
else()
    op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale)
W
Wu Yi 已提交
81
endif()
82

83
set(COMMON_OP_DEPS ${OP_HEADER_DEPS})
84

G
gongweibao 已提交
85
if (WITH_DGC)
86 87 88 89 90
    op_library(dgc_op DEPS dgc)
    file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(dgc);\n")
    set(COMMON_OP_DEPS ${COMMON_OP_DEPS} dgc)
endif()

A
Aurelius84 已提交
91

Z
Zeng Jinle 已提交
92
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} selected_rows_functor selected_rows lod_tensor maxouting unpooling pooling lod_rank_table context_project sequence_pooling executor device_memory_aligment)
P
peizhilin 已提交
93
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} dynload_warpctc)
X
xuezhong 已提交
94
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} sequence_padding sequence_scale cos_sim_functor memory jit_kernel_helper concat_and_split cross_entropy softmax vol2col im2col sampler sample_prob tree2col)
95
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} sequence2batch lstm_compute matrix_bit_code gru_compute activation_functions beam_search fc)
H
hutuxian 已提交
96
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} box_wrapper)
W
Wu Yi 已提交
97
if (WITH_GPU)
N
nhzlx 已提交
98
  set(COMMON_OP_DEPS ${COMMON_OP_DEPS} depthwise_conv prelu)
W
Wu Yi 已提交
99
endif()
100
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} device_memory_aligment)
101
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} layer)
102

W
Wu Yi 已提交
103 104 105 106 107 108 109 110
# FIXME(typhoonzero): operator deps may not needed.
# op_library(lod_tensor_to_array_op DEPS lod_rank_table_op)
# op_library(array_to_lod_tensor_op DEPS lod_rank_table_op)
# op_library(unsqueeze_op DEPS reshape_op)
# op_library(squeeze_op DEPS reshape_op)
# op_library(flatten_op DEPS reshape_op)
# op_library(unstack_op DEPS stack_op)
# op_library(tensor_array_to_tensor_op DEPS concat_op)
B
baiyf 已提交
111

W
Wu Yi 已提交
112 113
set(OPERATOR_DEPS ${OPERATOR_DEPS} ${COMMON_OP_DEPS})
set(GLOB_OPERATOR_DEPS ${OPERATOR_DEPS} CACHE INTERNAL "Global Op dependencies")
L
Luo Tao 已提交
114

115
cc_test(assign_op_test SRCS assign_op_test.cc DEPS assign_op)
116
cc_test(gather_test SRCS gather_test.cc DEPS tensor)
Y
Yu Yang 已提交
117
cc_test(scatter_test SRCS scatter_test.cc DEPS tensor math_function)
Q
Qiao Longfei 已提交
118
cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor)
Y
Yi Wang 已提交
119
cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory)
Y
Yu Yang 已提交
120
cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op)
121
cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op)
G
gongweibao 已提交
122
nv_test(dropout_op_test SRCS dropout_op_test.cc DEPS dropout_op tensor)
Z
Zeng Jinle 已提交
123 124 125 126 127
if (WITH_GPU)
    nv_test(test_leaky_relu_grad_grad_functor SRCS test_leaky_relu_grad_grad_functor.cc test_leaky_relu_grad_grad_functor.cu DEPS tensor device_context eigen3)
else()
    cc_test(test_leaky_relu_grad_grad_functor SRCS test_leaky_relu_grad_grad_functor.cc DEPS tensor device_context eigen3)
endif()
128

S
sneaxiy 已提交
129 130 131 132
if (WITH_PYTHON)
  cc_library(py_func_op SRCS py_func_op.cc DEPS op_registry python pybind)
endif()

W
Wu Yi 已提交
133
set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library")
134
add_subdirectory(benchmark)
135 136

cc_test(op_debug_string_test SRCS op_debug_string_test.cc DEPS elementwise_add_op)