From abc44b401542fbf84a86eb7916d31dc707c7edfa Mon Sep 17 00:00:00 2001 From: huangjiyi <43315610+huangjiyi@users.noreply.github.com> Date: Wed, 19 Apr 2023 16:19:51 +0800 Subject: [PATCH] Register fluid kerenls to phi [part 11] (#53035) * update * fix bug * fix bug * fix bug * fix bug --- .../detection/sigmoid_focal_loss_op.cc | 19 ++++++++----- .../detection/sigmoid_focal_loss_op.cu | 24 ++++++++++------- .../detection/sigmoid_focal_loss_op.h | 4 +-- .../operators/fused/skip_layernorm_op.cu | 17 +++++++----- .../optimizers/sparse_momentum_op.cc | 9 ++++--- .../optimizers/sparse_momentum_op.cu | 13 +++++---- .../operators/optimizers/sparse_momentum_op.h | 2 +- paddle/fluid/operators/similarity_focus_op.cc | 9 ++++--- paddle/fluid/operators/similarity_focus_op.h | 2 +- paddle/fluid/operators/smooth_l1_loss_op.cc | 8 +++--- paddle/fluid/operators/smooth_l1_loss_op.cu | 8 +++--- paddle/fluid/operators/smooth_l1_loss_op.h | 4 +-- paddle/fluid/operators/space_to_depth_op.cc | 26 +++++++++++------- paddle/fluid/operators/space_to_depth_op.cu | 27 +++++++++++-------- paddle/fluid/operators/space_to_depth_op.h | 4 +-- paddle/fluid/operators/sparse_attention_op.cu | 27 +++++++++++-------- paddle/fluid/operators/spp_op.cc | 11 ++++---- paddle/fluid/operators/spp_op.cu.cc | 11 ++++---- paddle/fluid/operators/spp_op.h | 4 +-- 19 files changed, 132 insertions(+), 97 deletions(-) diff --git a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cc b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cc index ff27945d187..fe716adb9f2 100644 --- a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cc +++ b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cc @@ -262,10 +262,15 @@ REGISTER_OPERATOR(sigmoid_focal_loss, ops::SigmoidFocalLossGradOpMaker, ops::SigmoidFocalLossGradOpMaker); REGISTER_OPERATOR(sigmoid_focal_loss_grad, ops::SigmoidFocalLossGradOp); -REGISTER_OP_CPU_KERNEL(sigmoid_focal_loss, - ops::SigmoidFocalLossKernel, - ops::SigmoidFocalLossKernel); -REGISTER_OP_CPU_KERNEL( - sigmoid_focal_loss_grad, - ops::SigmoidFocalLossGradKernel, - ops::SigmoidFocalLossGradKernel); +PD_REGISTER_STRUCT_KERNEL(sigmoid_focal_loss, + CPU, + ALL_LAYOUT, + ops::SigmoidFocalLossKernel, + float, + double) {} +PD_REGISTER_STRUCT_KERNEL(sigmoid_focal_loss_grad, + CPU, + ALL_LAYOUT, + ops::SigmoidFocalLossGradKernel, + float, + double) {} diff --git a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu index 6ff2e9c65d8..5d29d52669d 100644 --- a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu +++ b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu @@ -117,7 +117,7 @@ __global__ void GPUSigmoidFocalLossBackward(const T *x_data, } } -template +template class GPUSigmoidFocalLossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -148,7 +148,7 @@ class GPUSigmoidFocalLossKernel : public framework::OpKernel { } }; -template +template class GPUSigmoidFocalLossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -187,11 +187,15 @@ class GPUSigmoidFocalLossGradKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - sigmoid_focal_loss, - ops::GPUSigmoidFocalLossKernel, - ops::GPUSigmoidFocalLossKernel); -REGISTER_OP_CUDA_KERNEL( - sigmoid_focal_loss_grad, - ops::GPUSigmoidFocalLossGradKernel, - ops::GPUSigmoidFocalLossGradKernel); +PD_REGISTER_STRUCT_KERNEL(sigmoid_focal_loss, + GPU, + ALL_LAYOUT, + ops::GPUSigmoidFocalLossKernel, + float, + double) {} +PD_REGISTER_STRUCT_KERNEL(sigmoid_focal_loss_grad, + GPU, + ALL_LAYOUT, + ops::GPUSigmoidFocalLossGradKernel, + float, + double) {} diff --git a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.h b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.h index 0632e5ab8fa..28cac641d14 100644 --- a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.h +++ b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.h @@ -22,7 +22,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class SigmoidFocalLossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -73,7 +73,7 @@ class SigmoidFocalLossKernel : public framework::OpKernel { } }; -template +template class SigmoidFocalLossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { diff --git a/paddle/fluid/operators/fused/skip_layernorm_op.cu b/paddle/fluid/operators/fused/skip_layernorm_op.cu index f6fd97f918c..a1dc6b86e04 100644 --- a/paddle/fluid/operators/fused/skip_layernorm_op.cu +++ b/paddle/fluid/operators/fused/skip_layernorm_op.cu @@ -25,7 +25,7 @@ namespace paddle { namespace operators { -template +template class SkipLayerNormKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -89,13 +89,16 @@ class SkipLayerNormKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; #if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000 -REGISTER_OP_CUDA_KERNEL( - skip_layernorm, - ops::SkipLayerNormKernel, - ops::SkipLayerNormKernel); +PD_REGISTER_STRUCT_KERNEL(skip_layernorm, + GPU, + ALL_LAYOUT, + ops::SkipLayerNormKernel, + float, + plat::float16) {} #else -REGISTER_OP_CUDA_KERNEL(skip_layernorm, - ops::SkipLayerNormKernel); +PD_REGISTER_STRUCT_KERNEL( + skip_layernorm, GPU, ALL_LAYOUT, ops::SkipLayerNormKernel, float) {} #endif diff --git a/paddle/fluid/operators/optimizers/sparse_momentum_op.cc b/paddle/fluid/operators/optimizers/sparse_momentum_op.cc index f59171e3ae7..c9f9181664e 100644 --- a/paddle/fluid/operators/optimizers/sparse_momentum_op.cc +++ b/paddle/fluid/operators/optimizers/sparse_momentum_op.cc @@ -118,6 +118,9 @@ REGISTER_OPERATOR( paddle::framework::EmptyGradOpMaker, paddle::framework::EmptyGradOpMaker, ops::SparseMomentumOpInferVarType); -REGISTER_OP_CPU_KERNEL(sparse_momentum, - ops::SparseMomentumOpKernel, - ops::SparseMomentumOpKernel); +PD_REGISTER_STRUCT_KERNEL(sparse_momentum, + CPU, + ALL_LAYOUT, + ops::SparseMomentumOpKernel, + float, + double) {} diff --git a/paddle/fluid/operators/optimizers/sparse_momentum_op.cu b/paddle/fluid/operators/optimizers/sparse_momentum_op.cu index d8f8e9749b8..a0df85e1453 100644 --- a/paddle/fluid/operators/optimizers/sparse_momentum_op.cu +++ b/paddle/fluid/operators/optimizers/sparse_momentum_op.cu @@ -17,8 +17,11 @@ #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - sparse_momentum, - ops::SparseMomentumOpKernel, - ops::SparseMomentumOpKernel, - ops::SparseMomentumOpKernel); +namespace plat = paddle::platform; +PD_REGISTER_STRUCT_KERNEL(sparse_momentum, + GPU, + ALL_LAYOUT, + ops::SparseMomentumOpKernel, + float, + double, + plat::float16) {} diff --git a/paddle/fluid/operators/optimizers/sparse_momentum_op.h b/paddle/fluid/operators/optimizers/sparse_momentum_op.h index 7ea3b29cfad..8de0afcd6c3 100644 --- a/paddle/fluid/operators/optimizers/sparse_momentum_op.h +++ b/paddle/fluid/operators/optimizers/sparse_momentum_op.h @@ -295,7 +295,7 @@ class IndexMomentumFunctor { } }; -template +template class SparseMomentumOpKernel : public framework::OpKernel { using MPDType = MultiPrecisionType; diff --git a/paddle/fluid/operators/similarity_focus_op.cc b/paddle/fluid/operators/similarity_focus_op.cc index 536e878c6fc..4508459f255 100644 --- a/paddle/fluid/operators/similarity_focus_op.cc +++ b/paddle/fluid/operators/similarity_focus_op.cc @@ -91,6 +91,9 @@ REGISTER_OPERATOR( ops::SimilarityFocusOpMaker, paddle::framework::EmptyGradOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL(similarity_focus, - ops::SimilarityFocusKernel, - ops::SimilarityFocusKernel); +PD_REGISTER_STRUCT_KERNEL(similarity_focus, + CPU, + ALL_LAYOUT, + ops::SimilarityFocusKernel, + float, + double) {} diff --git a/paddle/fluid/operators/similarity_focus_op.h b/paddle/fluid/operators/similarity_focus_op.h index e706da9e014..32349e95703 100644 --- a/paddle/fluid/operators/similarity_focus_op.h +++ b/paddle/fluid/operators/similarity_focus_op.h @@ -25,7 +25,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class SimilarityFocusKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index f8bebe331d8..c1abfcb3e43 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -225,7 +225,7 @@ REGISTER_OPERATOR(smooth_l1_loss, ops::SmoothL1LossGradMaker, ops::SmoothL1LossGradMaker); REGISTER_OPERATOR(smooth_l1_loss_grad, ops::SmoothL1LossGradOp); -REGISTER_OP_CPU_KERNEL(smooth_l1_loss, - ops::SmoothL1LossKernel); -REGISTER_OP_CPU_KERNEL(smooth_l1_loss_grad, - ops::SmoothL1LossGradKernel); +PD_REGISTER_STRUCT_KERNEL( + smooth_l1_loss, CPU, ALL_LAYOUT, ops::SmoothL1LossKernel, float) {} +PD_REGISTER_STRUCT_KERNEL( + smooth_l1_loss_grad, CPU, ALL_LAYOUT, ops::SmoothL1LossGradKernel, float) {} diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cu b/paddle/fluid/operators/smooth_l1_loss_op.cu index d57b96d0ec5..31d528855cc 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cu +++ b/paddle/fluid/operators/smooth_l1_loss_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/smooth_l1_loss_op.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(smooth_l1_loss, - ops::SmoothL1LossKernel); -REGISTER_OP_CUDA_KERNEL(smooth_l1_loss_grad, - ops::SmoothL1LossGradKernel); +PD_REGISTER_STRUCT_KERNEL( + smooth_l1_loss, GPU, ALL_LAYOUT, ops::SmoothL1LossKernel, float) {} +PD_REGISTER_STRUCT_KERNEL( + smooth_l1_loss_grad, GPU, ALL_LAYOUT, ops::SmoothL1LossGradKernel, float) {} diff --git a/paddle/fluid/operators/smooth_l1_loss_op.h b/paddle/fluid/operators/smooth_l1_loss_op.h index e11f629d86d..bc57087d931 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.h +++ b/paddle/fluid/operators/smooth_l1_loss_op.h @@ -45,7 +45,7 @@ struct SmoothL1LossForward { T sigma2; }; -template +template class SmoothL1LossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -116,7 +116,7 @@ struct SmoothL1LossBackward { T sigma2; }; -template +template class SmoothL1LossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { diff --git a/paddle/fluid/operators/space_to_depth_op.cc b/paddle/fluid/operators/space_to_depth_op.cc index ed9c82c34fe..da0c2e4a3cb 100644 --- a/paddle/fluid/operators/space_to_depth_op.cc +++ b/paddle/fluid/operators/space_to_depth_op.cc @@ -224,13 +224,19 @@ REGISTER_OPERATOR(space_to_depth, REGISTER_OPERATOR(space_to_depth_grad, ops::SpaceToDepthGradOp, ops::SpaceToDepthGradOpNoBufferVarsInferer); -REGISTER_OP_CPU_KERNEL(space_to_depth, - ops::SpaceToDepthKernel, - ops::SpaceToDepthKernel, - ops::SpaceToDepthKernel, - ops::SpaceToDepthKernel); -REGISTER_OP_CPU_KERNEL(space_to_depth_grad, - ops::SpaceToDepthGradKernel, - ops::SpaceToDepthGradKernel, - ops::SpaceToDepthGradKernel, - ops::SpaceToDepthGradKernel); +PD_REGISTER_STRUCT_KERNEL(space_to_depth, + CPU, + ALL_LAYOUT, + ops::SpaceToDepthKernel, + int, + int64_t, + float, + double) {} +PD_REGISTER_STRUCT_KERNEL(space_to_depth_grad, + CPU, + ALL_LAYOUT, + ops::SpaceToDepthGradKernel, + int, + int64_t, + float, + double) {} diff --git a/paddle/fluid/operators/space_to_depth_op.cu b/paddle/fluid/operators/space_to_depth_op.cu index f9df5a5f74b..7f62509ee7d 100644 --- a/paddle/fluid/operators/space_to_depth_op.cu +++ b/paddle/fluid/operators/space_to_depth_op.cu @@ -17,14 +17,19 @@ namespace plat = paddle::platform; namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(space_to_depth, - ops::SpaceToDepthKernel, - ops::SpaceToDepthKernel, - ops::SpaceToDepthKernel, - ops::SpaceToDepthKernel); - -REGISTER_OP_CUDA_KERNEL(space_to_depth_grad, - ops::SpaceToDepthGradKernel, - ops::SpaceToDepthGradKernel, - ops::SpaceToDepthGradKernel, - ops::SpaceToDepthGradKernel); +PD_REGISTER_STRUCT_KERNEL(space_to_depth, + GPU, + ALL_LAYOUT, + ops::SpaceToDepthKernel, + int, + int64_t, + float, + double) {} +PD_REGISTER_STRUCT_KERNEL(space_to_depth_grad, + GPU, + ALL_LAYOUT, + ops::SpaceToDepthGradKernel, + int, + int64_t, + float, + double) {} diff --git a/paddle/fluid/operators/space_to_depth_op.h b/paddle/fluid/operators/space_to_depth_op.h index f700841670f..18ff67c6132 100644 --- a/paddle/fluid/operators/space_to_depth_op.h +++ b/paddle/fluid/operators/space_to_depth_op.h @@ -67,7 +67,7 @@ class space_to_depth_compute { T *out_; }; -template +template class SpaceToDepthKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -96,7 +96,7 @@ class SpaceToDepthKernel : public framework::OpKernel { } }; -template +template class SpaceToDepthGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { diff --git a/paddle/fluid/operators/sparse_attention_op.cu b/paddle/fluid/operators/sparse_attention_op.cu index c0ad3c9a578..086de1fd706 100644 --- a/paddle/fluid/operators/sparse_attention_op.cu +++ b/paddle/fluid/operators/sparse_attention_op.cu @@ -653,7 +653,7 @@ std::vector GetSplitTensor(phi::DenseTensor* input) { return input->Split(1, 0); } -template +template class SparseAttentionCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -773,7 +773,7 @@ class SparseAttentionCUDAKernel : public framework::OpKernel { } }; -template +template class SparseAttentionGradCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -890,12 +890,17 @@ class SparseAttentionGradCUDAKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_CUDA_KERNEL( - sparse_attention, - ops::SparseAttentionCUDAKernel, - ops::SparseAttentionCUDAKernel); - -REGISTER_OP_CUDA_KERNEL( - sparse_attention_grad, - ops::SparseAttentionGradCUDAKernel, - ops::SparseAttentionGradCUDAKernel); + +namespace ops = paddle::operators; +PD_REGISTER_STRUCT_KERNEL(sparse_attention, + GPU, + ALL_LAYOUT, + ops::SparseAttentionCUDAKernel, + float, + double) {} +PD_REGISTER_STRUCT_KERNEL(sparse_attention_grad, + GPU, + ALL_LAYOUT, + ops::SparseAttentionGradCUDAKernel, + float, + double) {} diff --git a/paddle/fluid/operators/spp_op.cc b/paddle/fluid/operators/spp_op.cc index b1ca67f5218..249fc63a300 100644 --- a/paddle/fluid/operators/spp_op.cc +++ b/paddle/fluid/operators/spp_op.cc @@ -109,9 +109,8 @@ REGISTER_OPERATOR( paddle::framework::DefaultGradOpMaker, paddle::framework::DefaultGradOpMaker); REGISTER_OPERATOR(spp_grad, ops::SppOpGrad); -REGISTER_OP_CPU_KERNEL(spp, - ops::SppKernel, - ops::SppKernel); -REGISTER_OP_CPU_KERNEL(spp_grad, - ops::SppGradKernel, - ops::SppGradKernel); + +PD_REGISTER_STRUCT_KERNEL(spp, CPU, ALL_LAYOUT, ops::SppKernel, float, double) { +} +PD_REGISTER_STRUCT_KERNEL( + spp_grad, CPU, ALL_LAYOUT, ops::SppGradKernel, float, double) {} diff --git a/paddle/fluid/operators/spp_op.cu.cc b/paddle/fluid/operators/spp_op.cu.cc index 24f4d65f661..b41fa8ae5fc 100644 --- a/paddle/fluid/operators/spp_op.cu.cc +++ b/paddle/fluid/operators/spp_op.cu.cc @@ -15,9 +15,8 @@ limitations under the License. */ #include "paddle/fluid/operators/spp_op.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(spp, - ops::SppKernel, - ops::SppKernel); -REGISTER_OP_CUDA_KERNEL(spp_grad, - ops::SppGradKernel, - ops::SppGradKernel); + +PD_REGISTER_STRUCT_KERNEL(spp, GPU, ALL_LAYOUT, ops::SppKernel, float, double) { +} +PD_REGISTER_STRUCT_KERNEL( + spp_grad, GPU, ALL_LAYOUT, ops::SppGradKernel, float, double) {} diff --git a/paddle/fluid/operators/spp_op.h b/paddle/fluid/operators/spp_op.h index 0b5c3f91ae1..bf810e88255 100644 --- a/paddle/fluid/operators/spp_op.h +++ b/paddle/fluid/operators/spp_op.h @@ -24,7 +24,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class SppKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -107,7 +107,7 @@ class SppKernel : public framework::OpKernel { } } }; -template +template class SppGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { -- GitLab