diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 652845e1aa661cb7d17bda88f50ebf5877d90d5c..6257983b7666e08252d6b77388ba80d07ce285c4 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -57,8 +57,8 @@ if(WITH_COVERAGE OR NOT WITH_AVX OR WIN32) SET(OP_MKL_DEPS ${OP_MKL_DEPS} pyramid_hash_op) endif() -register_operators(EXCLUDES py_func_op warpctc_op dgc_op conv_fusion_op - sync_batch_norm_op multihead_matmul_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS} ${OP_PREFETCH_DEPS}) +register_operators(EXCLUDES py_func_op warpctc_op dgc_op + sync_batch_norm_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS} ${OP_PREFETCH_DEPS}) if (WITH_GPU) # warpctc_op needs cudnn 7 above @@ -67,17 +67,10 @@ if (WITH_GPU) else() op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) endif() - # conv_fusion_op needs cudnn 7 above - if (NOT ${CUDNN_VERSION} VERSION_LESS 7100) - op_library(conv_fusion_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_fusion);\n") - endif() if (NOT WIN32) op_library(sync_batch_norm_op) file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(sync_batch_norm);\n") endif() - op_library(multihead_matmul_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(multihead_matmul);\n") else() op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) endif() diff --git a/paddle/fluid/operators/fused/CMakeLists.txt b/paddle/fluid/operators/fused/CMakeLists.txt index a31531c599a71e7da0697825a12ab86f5d809a51..42529f02920f91f55ef846108710292af93e1e01 100644 --- a/paddle/fluid/operators/fused/CMakeLists.txt +++ b/paddle/fluid/operators/fused/CMakeLists.txt @@ -1,12 +1,29 @@ include(operators) -register_operators(EXCLUDES fusion_transpose_flatten_concat_op fusion_conv_inception_op fused_fc_elementwise_layernorm_op) +register_operators(EXCLUDES + conv_fusion_op + fusion_transpose_flatten_concat_op + fusion_conv_inception_op + fused_fc_elementwise_layernorm_op + multihead_matmul_op) + if (WITH_GPU) - op_library(fusion_transpose_flatten_concat_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fusion_transpose_flatten_concat);\n") - if (NOT ${CUDNN_VERSION} VERSION_LESS 7100) - op_library(fusion_conv_inception_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_inception_fusion);\n") - endif() - op_library(fused_fc_elementwise_layernorm_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_fc_elementwise_layernorm);\n") + # conv_fusion_op needs cudnn 7 above + if (NOT ${CUDNN_VERSION} VERSION_LESS 7100) + op_library(conv_fusion_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_fusion);\n") + endif() + # fusion_transpose_flatten_concat_op + op_library(fusion_transpose_flatten_concat_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fusion_transpose_flatten_concat);\n") + # fusion_conv_inception_op needs cudnn 7 above + if (NOT ${CUDNN_VERSION} VERSION_LESS 7100) + op_library(fusion_conv_inception_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_inception_fusion);\n") + endif() + # fused_fc_elementwise_layernorm_op + op_library(fused_fc_elementwise_layernorm_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_fc_elementwise_layernorm);\n") + # multihead_matmul_op + op_library(multihead_matmul_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(multihead_matmul);\n") endif() diff --git a/paddle/fluid/operators/conv_fusion_op.cc b/paddle/fluid/operators/fused/conv_fusion_op.cc similarity index 88% rename from paddle/fluid/operators/conv_fusion_op.cc rename to paddle/fluid/operators/fused/conv_fusion_op.cc index 23b8087e781da30ed7b66ba651f8071ecb7aaf50..dd74d278111002dfee7a77b06c7126cceaa2afc1 100644 --- a/paddle/fluid/operators/conv_fusion_op.cc +++ b/paddle/fluid/operators/fused/conv_fusion_op.cc @@ -62,10 +62,10 @@ class Conv2DFusionOpMaker : public Conv2DOpMaker { class Conv2DFusionOpInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Input"), - "Input(Input) of ConvOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Filter"), - "Input(Filter) of ConvOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, + "Input(Input) of ConvOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("Filter"), true, + "Input(Filter) of ConvOp should not be null."); auto in_dims = ctx->GetInputDim("Input"); auto filter_dims = ctx->GetInputDim("Filter"); @@ -79,14 +79,14 @@ class Conv2DFusionOpInferShape : public framework::InferShapeBase { oshape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], paddings[i], strides[i])); } - PADDLE_ENFORCE(ctx->HasOutput("Output"), - "Output(Output) of ConvOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Output"), true, + "Output(Output) of ConvOp should not be null."); ctx->SetOutputDim("Output", framework::make_ddim(oshape)); std::vector channels = ctx->Attrs().Get>("split_channels"); if (channels.size()) { - PADDLE_ENFORCE(ctx->HasOutputs("Outputs"), - "Output(Outputs) of ConvOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasOutputs("Outputs"), true, + "Output(Outputs) of ConvOp should not be null."); std::vector oshapes; oshapes.reserve(channels.size()); for (size_t i = 0; i < channels.size(); ++i) { diff --git a/paddle/fluid/operators/conv_fusion_op.cu.cc b/paddle/fluid/operators/fused/conv_fusion_op.cu.cc similarity index 99% rename from paddle/fluid/operators/conv_fusion_op.cu.cc rename to paddle/fluid/operators/fused/conv_fusion_op.cu.cc index 566daa6608282d89a92418e2c168bdf2c09c65c1..0e2f3e1d8869f46f36c2bd90130a21df81f054e7 100644 --- a/paddle/fluid/operators/conv_fusion_op.cu.cc +++ b/paddle/fluid/operators/fused/conv_fusion_op.cu.cc @@ -41,7 +41,7 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { auto* input = ctx.Input("Input"); auto* filter = ctx.Input("Filter"); auto* bias = ctx.Input("Bias"); - PADDLE_ENFORCE(bias, "The bias should not be null."); + PADDLE_ENFORCE_NOT_NULL(bias, "The bias should not be null."); auto* residual = ctx.Input("ResidualData"); auto* output = ctx.Output("Output"); diff --git a/paddle/fluid/operators/multihead_matmul_op.cc b/paddle/fluid/operators/fused/multihead_matmul_op.cc similarity index 100% rename from paddle/fluid/operators/multihead_matmul_op.cc rename to paddle/fluid/operators/fused/multihead_matmul_op.cc diff --git a/paddle/fluid/operators/multihead_matmul_op.cu b/paddle/fluid/operators/fused/multihead_matmul_op.cu similarity index 100% rename from paddle/fluid/operators/multihead_matmul_op.cu rename to paddle/fluid/operators/fused/multihead_matmul_op.cu