From 2944d3c01d760f7c8fb2e8072fc98c0ca34c3fa0 Mon Sep 17 00:00:00 2001 From: huangjiyi <43315610+huangjiyi@users.noreply.github.com> Date: Wed, 19 Apr 2023 11:36:12 +0800 Subject: [PATCH] update (#53036) --- .../operators/detection/target_assign_op.cc | 6 ++-- .../operators/detection/target_assign_op.cu | 6 ++-- .../operators/detection/target_assign_op.h | 2 +- .../fluid/operators/squared_l2_distance_op.cc | 14 +++++---- .../fluid/operators/squared_l2_distance_op.cu | 13 ++++---- .../fluid/operators/squared_l2_distance_op.h | 4 +-- paddle/fluid/operators/tdm_child_op.cc | 15 ++++++---- paddle/fluid/operators/tdm_child_op.h | 2 +- paddle/fluid/operators/tdm_sampler_op.cc | 15 ++++++---- paddle/fluid/operators/tdm_sampler_op.h | 2 +- .../teacher_student_sigmoid_loss_op.cc | 19 +++++++----- .../teacher_student_sigmoid_loss_op.h | 4 +-- paddle/fluid/operators/temporal_shift_op.cu | 30 +++++++++++-------- paddle/fluid/operators/tree_conv_op.cc | 11 +++---- paddle/fluid/operators/tree_conv_op.cu | 11 ++++--- paddle/fluid/operators/tree_conv_op.h | 4 +-- .../fluid/operators/unique_with_counts_op.cc | 13 ++++---- .../fluid/operators/unique_with_counts_op.h | 2 +- paddle/fluid/operators/var_conv_2d_op.cc | 16 ++++------ 19 files changed, 104 insertions(+), 85 deletions(-) diff --git a/paddle/fluid/operators/detection/target_assign_op.cc b/paddle/fluid/operators/detection/target_assign_op.cc index 155ec31fa92..437b46c459f 100644 --- a/paddle/fluid/operators/detection/target_assign_op.cc +++ b/paddle/fluid/operators/detection/target_assign_op.cc @@ -186,6 +186,6 @@ REGISTER_OPERATOR( ops::TargetAssignOpMaker, paddle::framework::EmptyGradOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL(target_assign, - ops::TargetAssignKernel, - ops::TargetAssignKernel); + +PD_REGISTER_STRUCT_KERNEL( + target_assign, CPU, ALL_LAYOUT, ops::TargetAssignKernel, int, float) {} diff --git a/paddle/fluid/operators/detection/target_assign_op.cu b/paddle/fluid/operators/detection/target_assign_op.cu index 337f55a3ca8..951fcdbafae 100644 --- a/paddle/fluid/operators/detection/target_assign_op.cu +++ b/paddle/fluid/operators/detection/target_assign_op.cu @@ -65,6 +65,6 @@ template struct NegTargetAssignFunctor; } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(target_assign, - ops::TargetAssignKernel, - ops::TargetAssignKernel); + +PD_REGISTER_STRUCT_KERNEL( + target_assign, GPU, ALL_LAYOUT, ops::TargetAssignKernel, int, float) {} diff --git a/paddle/fluid/operators/detection/target_assign_op.h b/paddle/fluid/operators/detection/target_assign_op.h index 3319dffd226..484bd8454ba 100644 --- a/paddle/fluid/operators/detection/target_assign_op.h +++ b/paddle/fluid/operators/detection/target_assign_op.h @@ -92,7 +92,7 @@ struct NegTargetAssignFunctor { WT* out_wt) const; }; -template +template class TargetAssignKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { diff --git a/paddle/fluid/operators/squared_l2_distance_op.cc b/paddle/fluid/operators/squared_l2_distance_op.cc index f1ed2d3ee68..0f2f727dd91 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.cc +++ b/paddle/fluid/operators/squared_l2_distance_op.cc @@ -221,8 +221,12 @@ REGISTER_OPERATOR( REGISTER_OPERATOR(squared_l2_distance_grad, ops::SquaredL2DistanceGradOp, ops::SquaredL2DistanceGradOpNoBufferVarsInferer); -REGISTER_OP_CPU_KERNEL(squared_l2_distance, - ops::SquaredL2DistanceKernel); -REGISTER_OP_CPU_KERNEL( - squared_l2_distance_grad, - ops::SquaredL2DistanceGradKernel); + +PD_REGISTER_STRUCT_KERNEL( + squared_l2_distance, CPU, ALL_LAYOUT, ops::SquaredL2DistanceKernel, float) { +} +PD_REGISTER_STRUCT_KERNEL(squared_l2_distance_grad, + CPU, + ALL_LAYOUT, + ops::SquaredL2DistanceGradKernel, + float) {} diff --git a/paddle/fluid/operators/squared_l2_distance_op.cu b/paddle/fluid/operators/squared_l2_distance_op.cu index c10cbfb42f1..4411df4d9ab 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.cu +++ b/paddle/fluid/operators/squared_l2_distance_op.cu @@ -14,8 +14,11 @@ limitations under the License. */ #include "paddle/fluid/operators/squared_l2_distance_op.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(squared_l2_distance, - ops::SquaredL2DistanceKernel); -REGISTER_OP_CUDA_KERNEL( - squared_l2_distance_grad, - ops::SquaredL2DistanceGradKernel); +PD_REGISTER_STRUCT_KERNEL( + squared_l2_distance, GPU, ALL_LAYOUT, ops::SquaredL2DistanceKernel, float) { +} +PD_REGISTER_STRUCT_KERNEL(squared_l2_distance_grad, + GPU, + ALL_LAYOUT, + ops::SquaredL2DistanceGradKernel, + float) {} diff --git a/paddle/fluid/operators/squared_l2_distance_op.h b/paddle/fluid/operators/squared_l2_distance_op.h index f0838c4fad2..18039835c55 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.h +++ b/paddle/fluid/operators/squared_l2_distance_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class SquaredL2DistanceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -60,7 +60,7 @@ class SquaredL2DistanceKernel : public framework::OpKernel { } }; -template +template class SquaredL2DistanceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { diff --git a/paddle/fluid/operators/tdm_child_op.cc b/paddle/fluid/operators/tdm_child_op.cc index 0ec2c1e85bf..266bbb9657d 100644 --- a/paddle/fluid/operators/tdm_child_op.cc +++ b/paddle/fluid/operators/tdm_child_op.cc @@ -119,9 +119,12 @@ REGISTER_OPERATOR( ops::TDMChildOpMaker, paddle::framework::EmptyGradOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL( - tdm_child, - ops::TDMChildKernel, - ops::TDMChildKernel, - ops::TDMChildKernel, - ops::TDMChildKernel); + +PD_REGISTER_STRUCT_KERNEL(tdm_child, + CPU, + ALL_LAYOUT, + ops::TDMChildKernel, + float, + double, + int, + int64_t) {} diff --git a/paddle/fluid/operators/tdm_child_op.h b/paddle/fluid/operators/tdm_child_op.h index 0064567887e..afd8e667b82 100644 --- a/paddle/fluid/operators/tdm_child_op.h +++ b/paddle/fluid/operators/tdm_child_op.h @@ -105,7 +105,7 @@ void TDMChildInner(const framework::ExecutionContext &context, memcpy(leaf_mask_data, &item_mask_vec[0], sizeof(OutT) * output_nums); } -template +template class TDMChildKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { diff --git a/paddle/fluid/operators/tdm_sampler_op.cc b/paddle/fluid/operators/tdm_sampler_op.cc index 66e9728d88f..cb667342f07 100644 --- a/paddle/fluid/operators/tdm_sampler_op.cc +++ b/paddle/fluid/operators/tdm_sampler_op.cc @@ -136,9 +136,12 @@ REGISTER_OPERATOR( ops::TDMSamplerOpMaker, paddle::framework::EmptyGradOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL( - tdm_sampler, - ops::TDMSamplerKernel, - ops::TDMSamplerKernel, - ops::TDMSamplerKernel, - ops::TDMSamplerKernel); + +PD_REGISTER_STRUCT_KERNEL(tdm_sampler, + CPU, + ALL_LAYOUT, + ops::TDMSamplerKernel, + float, + double, + int, + int64_t) {} diff --git a/paddle/fluid/operators/tdm_sampler_op.h b/paddle/fluid/operators/tdm_sampler_op.h index e7fd36b8dcb..4baff820784 100644 --- a/paddle/fluid/operators/tdm_sampler_op.h +++ b/paddle/fluid/operators/tdm_sampler_op.h @@ -251,7 +251,7 @@ void TDMSamplerInner(const framework::ExecutionContext &context, } } -template +template class TDMSamplerKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { diff --git a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc index fdb78f9da32..ad54a49f820 100644 --- a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc +++ b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc @@ -249,10 +249,15 @@ REGISTER_OPERATOR( REGISTER_OPERATOR(teacher_student_sigmoid_loss_grad, ops::TeacherStudentSigmoidLossGradientOp); -REGISTER_OP_CPU_KERNEL(teacher_student_sigmoid_loss, - ops::TeacherStudentSigmoidLossOpKernel, - ops::TeacherStudentSigmoidLossOpKernel); - -REGISTER_OP_CPU_KERNEL(teacher_student_sigmoid_loss_grad, - ops::TeacherStudentSigmoidLossGradOpKernel, - ops::TeacherStudentSigmoidLossGradOpKernel); +PD_REGISTER_STRUCT_KERNEL(teacher_student_sigmoid_loss, + CPU, + ALL_LAYOUT, + ops::TeacherStudentSigmoidLossOpKernel, + float, + double) {} +PD_REGISTER_STRUCT_KERNEL(teacher_student_sigmoid_loss_grad, + CPU, + ALL_LAYOUT, + ops::TeacherStudentSigmoidLossGradOpKernel, + float, + double) {} diff --git a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.h b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.h index 133d9656284..7ccb9438d41 100644 --- a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.h +++ b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class TeacherStudentSigmoidLossOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -63,7 +63,7 @@ class TeacherStudentSigmoidLossOpKernel : public framework::OpKernel { } }; -template +template class TeacherStudentSigmoidLossGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { diff --git a/paddle/fluid/operators/temporal_shift_op.cu b/paddle/fluid/operators/temporal_shift_op.cu index d2583aeb143..68f8153e88e 100644 --- a/paddle/fluid/operators/temporal_shift_op.cu +++ b/paddle/fluid/operators/temporal_shift_op.cu @@ -152,7 +152,7 @@ __global__ void KeTemporalShiftBwNHWC(const T* output_grad, } } -template +template class TemporalShiftOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -208,7 +208,7 @@ class TemporalShiftOpCUDAKernel : public framework::OpKernel { } }; -template +template class TemporalShiftGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -267,13 +267,19 @@ class TemporalShiftGradOpCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - temporal_shift, - ops::TemporalShiftOpCUDAKernel, - ops::TemporalShiftOpCUDAKernel, - ops::TemporalShiftOpCUDAKernel); -REGISTER_OP_CUDA_KERNEL( - temporal_shift_grad, - ops::TemporalShiftGradOpCUDAKernel, - ops::TemporalShiftGradOpCUDAKernel, - ops::TemporalShiftGradOpCUDAKernel); +namespace plat = paddle::platform; + +PD_REGISTER_STRUCT_KERNEL(temporal_shift, + GPU, + ALL_LAYOUT, + ops::TemporalShiftOpCUDAKernel, + float, + double, + plat::float16) {} +PD_REGISTER_STRUCT_KERNEL(temporal_shift_grad, + GPU, + ALL_LAYOUT, + ops::TemporalShiftGradOpCUDAKernel, + float, + double, + plat::float16) {} diff --git a/paddle/fluid/operators/tree_conv_op.cc b/paddle/fluid/operators/tree_conv_op.cc index 0e78aa20faa..7265d966b9e 100644 --- a/paddle/fluid/operators/tree_conv_op.cc +++ b/paddle/fluid/operators/tree_conv_op.cc @@ -234,10 +234,7 @@ REGISTER_OPERATOR(tree_conv, REGISTER_OPERATOR(tree_conv_grad, ops::TreeConvGradOp); -REGISTER_OP_CPU_KERNEL(tree_conv, - ops::TreeConvKernel, - ops::TreeConvKernel); - -REGISTER_OP_CPU_KERNEL(tree_conv_grad, - ops::TreeConvGradKernel, - ops::TreeConvGradKernel); +PD_REGISTER_STRUCT_KERNEL( + tree_conv, CPU, ALL_LAYOUT, ops::TreeConvKernel, float, double) {} +PD_REGISTER_STRUCT_KERNEL( + tree_conv_grad, CPU, ALL_LAYOUT, ops::TreeConvGradKernel, float, double) {} diff --git a/paddle/fluid/operators/tree_conv_op.cu b/paddle/fluid/operators/tree_conv_op.cu index 1e4ca7bb838..1bfcb94013c 100644 --- a/paddle/fluid/operators/tree_conv_op.cu +++ b/paddle/fluid/operators/tree_conv_op.cu @@ -15,9 +15,8 @@ #include "paddle/fluid/operators/tree_conv_op.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(tree_conv, - ops::TreeConvKernel, - ops::TreeConvKernel); -REGISTER_OP_CUDA_KERNEL(tree_conv_grad, - ops::TreeConvGradKernel, - ops::TreeConvGradKernel); + +PD_REGISTER_STRUCT_KERNEL( + tree_conv, GPU, ALL_LAYOUT, ops::TreeConvKernel, float, double) {} +PD_REGISTER_STRUCT_KERNEL( + tree_conv_grad, GPU, ALL_LAYOUT, ops::TreeConvGradKernel, float, double) {} diff --git a/paddle/fluid/operators/tree_conv_op.h b/paddle/fluid/operators/tree_conv_op.h index cab0796a710..18fd5bea29d 100644 --- a/paddle/fluid/operators/tree_conv_op.h +++ b/paddle/fluid/operators/tree_conv_op.h @@ -23,7 +23,7 @@ namespace paddle { namespace operators { using DDim = framework::DDim; -template +template class TreeConvKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { @@ -73,7 +73,7 @@ class TreeConvKernel : public framework::OpKernel { } } }; -template +template class TreeConvGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { diff --git a/paddle/fluid/operators/unique_with_counts_op.cc b/paddle/fluid/operators/unique_with_counts_op.cc index 3726fd978bd..5272158805d 100644 --- a/paddle/fluid/operators/unique_with_counts_op.cc +++ b/paddle/fluid/operators/unique_with_counts_op.cc @@ -75,8 +75,11 @@ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(unique_with_counts, ops::UniqueWithCountsOp, ops::UniqueWithCountsOpMaker); -REGISTER_OP_CPU_KERNEL(unique_with_counts, - ops::UniqueWithCountsKernel, - ops::UniqueWithCountsKernel, - ops::UniqueWithCountsKernel, - ops::UniqueWithCountsKernel); +PD_REGISTER_STRUCT_KERNEL(unique_with_counts, + CPU, + ALL_LAYOUT, + ops::UniqueWithCountsKernel, + float, + double, + int32_t, + int64_t) {} diff --git a/paddle/fluid/operators/unique_with_counts_op.h b/paddle/fluid/operators/unique_with_counts_op.h index eb3cc2d4731..4b1fef5e224 100644 --- a/paddle/fluid/operators/unique_with_counts_op.h +++ b/paddle/fluid/operators/unique_with_counts_op.h @@ -25,7 +25,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class UniqueWithCountsKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { diff --git a/paddle/fluid/operators/var_conv_2d_op.cc b/paddle/fluid/operators/var_conv_2d_op.cc index f60190f00cb..112aefbe7f2 100644 --- a/paddle/fluid/operators/var_conv_2d_op.cc +++ b/paddle/fluid/operators/var_conv_2d_op.cc @@ -176,7 +176,7 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { } } -template +template class CPUVarConv2dOPKernel : public framework::OpKernel { public: void Im2Col(const framework::ExecutionContext& ctx, @@ -392,7 +392,7 @@ void VarConv2dOpGrad::InferShape(framework::InferShapeContext* ctx) const { } } -template +template class CPUVarConv2dOPGradKernel : public framework::OpKernel { public: void Im2ColGrad(const framework::ExecutionContext& ctx, T* top_diff) const { @@ -532,11 +532,7 @@ REGISTER_OPERATOR(var_conv_2d, ops::VarConv2dGradMaker); REGISTER_OPERATOR(var_conv_2d_grad, ops::VarConv2dOpGrad); -REGISTER_OP_CPU_KERNEL(var_conv_2d, - ops::CPUVarConv2dOPKernel); -// ops::CPUVarConv2dOPKernel -REGISTER_OP_CPU_KERNEL(var_conv_2d_grad, - ops::CPUVarConv2dOPGradKernel); -// ops::CPUVarConv2dOPGradKernel +PD_REGISTER_STRUCT_KERNEL( + var_conv_2d, CPU, ALL_LAYOUT, ops::CPUVarConv2dOPKernel, float) {} +PD_REGISTER_STRUCT_KERNEL( + var_conv_2d_grad, CPU, ALL_LAYOUT, ops::CPUVarConv2dOPGradKernel, float) {} -- GitLab