From 03ba0fdae685e6e807bc78c605ef5198b036390d Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Wed, 30 Oct 2019 14:27:47 +0800 Subject: [PATCH] Move the codes of fused operators to operators/fused directory. (#20881) * Move the codes of fused operators to operators/fused directory. test=develop * Correct the op name in cmake. * Change the use of PADDLE_ENFORCE. test=develop --- paddle/fluid/operators/CMakeLists.txt | 11 ++---- paddle/fluid/operators/fused/CMakeLists.txt | 35 ++++++++++++++----- .../operators/{ => fused}/conv_fusion_op.cc | 16 ++++----- .../{ => fused}/conv_fusion_op.cu.cc | 2 +- .../{ => fused}/multihead_matmul_op.cc | 0 .../{ => fused}/multihead_matmul_op.cu | 0 6 files changed, 37 insertions(+), 27 deletions(-) rename paddle/fluid/operators/{ => fused}/conv_fusion_op.cc (88%) rename paddle/fluid/operators/{ => fused}/conv_fusion_op.cu.cc (99%) rename paddle/fluid/operators/{ => fused}/multihead_matmul_op.cc (100%) rename paddle/fluid/operators/{ => fused}/multihead_matmul_op.cu (100%) diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 652845e1aa..6257983b76 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -57,8 +57,8 @@ if(WITH_COVERAGE OR NOT WITH_AVX OR WIN32) SET(OP_MKL_DEPS ${OP_MKL_DEPS} pyramid_hash_op) endif() -register_operators(EXCLUDES py_func_op warpctc_op dgc_op conv_fusion_op - sync_batch_norm_op multihead_matmul_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS} ${OP_PREFETCH_DEPS}) +register_operators(EXCLUDES py_func_op warpctc_op dgc_op + sync_batch_norm_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS} ${OP_PREFETCH_DEPS}) if (WITH_GPU) # warpctc_op needs cudnn 7 above @@ -67,17 +67,10 @@ if (WITH_GPU) else() op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) endif() - # conv_fusion_op needs cudnn 7 above - if (NOT ${CUDNN_VERSION} VERSION_LESS 7100) - op_library(conv_fusion_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_fusion);\n") - endif() if (NOT WIN32) op_library(sync_batch_norm_op) file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(sync_batch_norm);\n") endif() - op_library(multihead_matmul_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(multihead_matmul);\n") else() op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) endif() diff --git a/paddle/fluid/operators/fused/CMakeLists.txt b/paddle/fluid/operators/fused/CMakeLists.txt index a31531c599..42529f0292 100644 --- a/paddle/fluid/operators/fused/CMakeLists.txt +++ b/paddle/fluid/operators/fused/CMakeLists.txt @@ -1,12 +1,29 @@ include(operators) -register_operators(EXCLUDES fusion_transpose_flatten_concat_op fusion_conv_inception_op fused_fc_elementwise_layernorm_op) +register_operators(EXCLUDES + conv_fusion_op + fusion_transpose_flatten_concat_op + fusion_conv_inception_op + fused_fc_elementwise_layernorm_op + multihead_matmul_op) + if (WITH_GPU) - op_library(fusion_transpose_flatten_concat_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fusion_transpose_flatten_concat);\n") - if (NOT ${CUDNN_VERSION} VERSION_LESS 7100) - op_library(fusion_conv_inception_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_inception_fusion);\n") - endif() - op_library(fused_fc_elementwise_layernorm_op) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_fc_elementwise_layernorm);\n") + # conv_fusion_op needs cudnn 7 above + if (NOT ${CUDNN_VERSION} VERSION_LESS 7100) + op_library(conv_fusion_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_fusion);\n") + endif() + # fusion_transpose_flatten_concat_op + op_library(fusion_transpose_flatten_concat_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fusion_transpose_flatten_concat);\n") + # fusion_conv_inception_op needs cudnn 7 above + if (NOT ${CUDNN_VERSION} VERSION_LESS 7100) + op_library(fusion_conv_inception_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_inception_fusion);\n") + endif() + # fused_fc_elementwise_layernorm_op + op_library(fused_fc_elementwise_layernorm_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_fc_elementwise_layernorm);\n") + # multihead_matmul_op + op_library(multihead_matmul_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(multihead_matmul);\n") endif() diff --git a/paddle/fluid/operators/conv_fusion_op.cc b/paddle/fluid/operators/fused/conv_fusion_op.cc similarity index 88% rename from paddle/fluid/operators/conv_fusion_op.cc rename to paddle/fluid/operators/fused/conv_fusion_op.cc index 23b8087e78..dd74d27811 100644 --- a/paddle/fluid/operators/conv_fusion_op.cc +++ b/paddle/fluid/operators/fused/conv_fusion_op.cc @@ -62,10 +62,10 @@ class Conv2DFusionOpMaker : public Conv2DOpMaker { class Conv2DFusionOpInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Input"), - "Input(Input) of ConvOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Filter"), - "Input(Filter) of ConvOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, + "Input(Input) of ConvOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("Filter"), true, + "Input(Filter) of ConvOp should not be null."); auto in_dims = ctx->GetInputDim("Input"); auto filter_dims = ctx->GetInputDim("Filter"); @@ -79,14 +79,14 @@ class Conv2DFusionOpInferShape : public framework::InferShapeBase { oshape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], paddings[i], strides[i])); } - PADDLE_ENFORCE(ctx->HasOutput("Output"), - "Output(Output) of ConvOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Output"), true, + "Output(Output) of ConvOp should not be null."); ctx->SetOutputDim("Output", framework::make_ddim(oshape)); std::vector channels = ctx->Attrs().Get>("split_channels"); if (channels.size()) { - PADDLE_ENFORCE(ctx->HasOutputs("Outputs"), - "Output(Outputs) of ConvOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasOutputs("Outputs"), true, + "Output(Outputs) of ConvOp should not be null."); std::vector oshapes; oshapes.reserve(channels.size()); for (size_t i = 0; i < channels.size(); ++i) { diff --git a/paddle/fluid/operators/conv_fusion_op.cu.cc b/paddle/fluid/operators/fused/conv_fusion_op.cu.cc similarity index 99% rename from paddle/fluid/operators/conv_fusion_op.cu.cc rename to paddle/fluid/operators/fused/conv_fusion_op.cu.cc index 566daa6608..0e2f3e1d88 100644 --- a/paddle/fluid/operators/conv_fusion_op.cu.cc +++ b/paddle/fluid/operators/fused/conv_fusion_op.cu.cc @@ -41,7 +41,7 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { auto* input = ctx.Input("Input"); auto* filter = ctx.Input("Filter"); auto* bias = ctx.Input("Bias"); - PADDLE_ENFORCE(bias, "The bias should not be null."); + PADDLE_ENFORCE_NOT_NULL(bias, "The bias should not be null."); auto* residual = ctx.Input("ResidualData"); auto* output = ctx.Output("Output"); diff --git a/paddle/fluid/operators/multihead_matmul_op.cc b/paddle/fluid/operators/fused/multihead_matmul_op.cc similarity index 100% rename from paddle/fluid/operators/multihead_matmul_op.cc rename to paddle/fluid/operators/fused/multihead_matmul_op.cc diff --git a/paddle/fluid/operators/multihead_matmul_op.cu b/paddle/fluid/operators/fused/multihead_matmul_op.cu similarity index 100% rename from paddle/fluid/operators/multihead_matmul_op.cu rename to paddle/fluid/operators/fused/multihead_matmul_op.cu -- GitLab