From 37489df57f42f882c8fbc8bbe2f9edbf6ef3298c Mon Sep 17 00:00:00 2001 From: huangjiyi <43315610+huangjiyi@users.noreply.github.com> Date: Wed, 26 Apr 2023 10:40:07 +0800 Subject: [PATCH] Register fluid xpu kerenls to phi [part 3] (#53189) * update * update --- .../fluid/operators/affine_channel_op_xpu.cc | 16 ++++++++------ .../fused/resnet_basic_block_op_xpu.cc | 18 ++++++++++----- .../operators/fused/resnet_unit_op_xpu.cc | 22 ++++++++++++------- paddle/fluid/operators/sampling_id_op_xpu.cc | 6 ++--- .../sequence_ops/sequence_conv_op_xpu.cc | 18 +++++++-------- .../sequence_ops/sequence_unpad_op_xpu.cc | 4 ++-- .../uniform_random_inplace_op_xpu.cc | 19 ++++++++++------ 7 files changed, 60 insertions(+), 43 deletions(-) diff --git a/paddle/fluid/operators/affine_channel_op_xpu.cc b/paddle/fluid/operators/affine_channel_op_xpu.cc index 2649a9190b8..7a4de54954d 100644 --- a/paddle/fluid/operators/affine_channel_op_xpu.cc +++ b/paddle/fluid/operators/affine_channel_op_xpu.cc @@ -25,7 +25,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class AffineChannelXPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -85,7 +85,7 @@ class AffineChannelXPUKernel : public framework::OpKernel { } }; -template +template class AffineChannelGradXPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -189,10 +189,12 @@ class AffineChannelGradXPUKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -using XPU = paddle::platform::XPUDeviceContext; - -REGISTER_OP_XPU_KERNEL(affine_channel, ops::AffineChannelXPUKernel); -REGISTER_OP_XPU_KERNEL(affine_channel_grad, - ops::AffineChannelGradXPUKernel); +PD_REGISTER_STRUCT_KERNEL( + affine_channel, XPU, ALL_LAYOUT, ops::AffineChannelXPUKernel, float) {} +PD_REGISTER_STRUCT_KERNEL(affine_channel_grad, + XPU, + ALL_LAYOUT, + ops::AffineChannelGradXPUKernel, + float) {} #endif diff --git a/paddle/fluid/operators/fused/resnet_basic_block_op_xpu.cc b/paddle/fluid/operators/fused/resnet_basic_block_op_xpu.cc index f6b2d30453f..4d026f4b780 100644 --- a/paddle/fluid/operators/fused/resnet_basic_block_op_xpu.cc +++ b/paddle/fluid/operators/fused/resnet_basic_block_op_xpu.cc @@ -293,7 +293,7 @@ static inline void xpu_conv2d_grad(xpu::Context* ctx, PADDLE_ENFORCE_XDNN_SUCCESS(r, "conv2d_grad"); } -template +template class ResNetBasicBlockXPUKernel : public framework::OpKernel { public: using XPUT = typename XPUTypeTrait::Type; @@ -696,7 +696,7 @@ class ResNetBasicBlockXPUKernel : public framework::OpKernel { } }; -template +template class ResNetBasicBlockGradXPUKernel : public framework::OpKernel { public: using XPUT = typename XPUTypeTrait::Type; @@ -992,8 +992,14 @@ class ResNetBasicBlockGradXPUKernel : public framework::OpKernel { namespace ops = paddle::operators; namespace plat = paddle::platform; -REGISTER_OP_XPU_KERNEL(resnet_basic_block, - ops::ResNetBasicBlockXPUKernel); -REGISTER_OP_XPU_KERNEL(resnet_basic_block_grad, - ops::ResNetBasicBlockGradXPUKernel); +PD_REGISTER_STRUCT_KERNEL(resnet_basic_block, + XPU, + ALL_LAYOUT, + ops::ResNetBasicBlockXPUKernel, + float) {} +PD_REGISTER_STRUCT_KERNEL(resnet_basic_block_grad, + XPU, + ALL_LAYOUT, + ops::ResNetBasicBlockGradXPUKernel, + float) {} #endif diff --git a/paddle/fluid/operators/fused/resnet_unit_op_xpu.cc b/paddle/fluid/operators/fused/resnet_unit_op_xpu.cc index 1e2741cde5d..1e4ed290f43 100644 --- a/paddle/fluid/operators/fused/resnet_unit_op_xpu.cc +++ b/paddle/fluid/operators/fused/resnet_unit_op_xpu.cc @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class ResNetUnitXPUKernel : public framework::OpKernel { using XPUType = typename XPUTypeTrait::Type; @@ -181,7 +181,7 @@ class ResNetUnitXPUKernel : public framework::OpKernel { } }; -template +template class ResNetUnitGradXPUKernel : public framework::OpKernel { using XPUType = typename XPUTypeTrait::Type; @@ -361,9 +361,15 @@ class ResNetUnitGradXPUKernel : public framework::OpKernel { namespace ops = paddle::operators; namespace plat = paddle::platform; -REGISTER_OP_XPU_KERNEL(resnet_unit, - ops::ResNetUnitXPUKernel, - ops::ResNetUnitXPUKernel); -REGISTER_OP_XPU_KERNEL(resnet_unit_grad, - ops::ResNetUnitGradXPUKernel, - ops::ResNetUnitGradXPUKernel); +PD_REGISTER_STRUCT_KERNEL(resnet_unit, + XPU, + ALL_LAYOUT, + ops::ResNetUnitXPUKernel, + plat::float16, + float) {} +PD_REGISTER_STRUCT_KERNEL(resnet_unit_grad, + XPU, + ALL_LAYOUT, + ops::ResNetUnitGradXPUKernel, + plat::float16, + float) {} diff --git a/paddle/fluid/operators/sampling_id_op_xpu.cc b/paddle/fluid/operators/sampling_id_op_xpu.cc index 0b720c21381..9fd0193733e 100644 --- a/paddle/fluid/operators/sampling_id_op_xpu.cc +++ b/paddle/fluid/operators/sampling_id_op_xpu.cc @@ -16,8 +16,6 @@ #include "paddle/fluid/platform/device_context.h" namespace ops = paddle::operators; -using XPUCtx = paddle::platform::XPUDeviceContext; -REGISTER_OP_XPU_KERNEL(sampling_id, - paddle::operators::SamplingIdKernel, - paddle::operators::SamplingIdKernel); +PD_REGISTER_STRUCT_KERNEL( + sampling_id, XPU, ALL_LAYOUT, ops::SamplingIdKernel, float, double) {} diff --git a/paddle/fluid/operators/sequence_ops/sequence_conv_op_xpu.cc b/paddle/fluid/operators/sequence_ops/sequence_conv_op_xpu.cc index f7b0b5c3b58..53fb13180c3 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_conv_op_xpu.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op_xpu.cc @@ -20,7 +20,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class SequenceConvXPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -154,7 +154,7 @@ class SequenceConvXPUKernel : public framework::OpKernel { } }; -template +template class SequenceConvGradXPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -363,12 +363,12 @@ class SequenceConvGradXPUKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_XPU_KERNEL( - sequence_conv, - ops::SequenceConvXPUKernel); - -REGISTER_OP_XPU_KERNEL( - sequence_conv_grad, - ops::SequenceConvGradXPUKernel); +PD_REGISTER_STRUCT_KERNEL( + sequence_conv, XPU, ALL_LAYOUT, ops::SequenceConvXPUKernel, float) {} +PD_REGISTER_STRUCT_KERNEL(sequence_conv_grad, + XPU, + ALL_LAYOUT, + ops::SequenceConvGradXPUKernel, + float) {} #endif diff --git a/paddle/fluid/operators/sequence_ops/sequence_unpad_op_xpu.cc b/paddle/fluid/operators/sequence_ops/sequence_unpad_op_xpu.cc index cc81ad20cac..c875cdc37e8 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_unpad_op_xpu.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_unpad_op_xpu.cc @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/operators/sequence_ops/sequence_unpad_op.h" namespace ops = paddle::operators; -REGISTER_OP_XPU_KERNEL(sequence_unpad, - ops::SequenceUnpadOpKernel); +PD_REGISTER_STRUCT_KERNEL( + sequence_unpad, XPU, ALL_LAYOUT, ops::SequenceUnpadOpKernel, float) {} #endif diff --git a/paddle/fluid/operators/uniform_random_inplace_op_xpu.cc b/paddle/fluid/operators/uniform_random_inplace_op_xpu.cc index bf0360ace0b..f1afd8ef3e2 100644 --- a/paddle/fluid/operators/uniform_random_inplace_op_xpu.cc +++ b/paddle/fluid/operators/uniform_random_inplace_op_xpu.cc @@ -22,7 +22,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class XPUUniformRandomInplaceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { @@ -71,7 +71,7 @@ class XPUUniformRandomInplaceKernel : public framework::OpKernel { } }; -template +template class XPUUniformRandomInplaceGradKernel : public framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext &ctx) const override { @@ -95,10 +95,15 @@ class XPUUniformRandomInplaceGradKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_XPU_KERNEL(uniform_random_inplace, - paddle::operators::XPUUniformRandomInplaceKernel); -REGISTER_OP_XPU_KERNEL( - uniform_random_inplace_grad, - paddle::operators::XPUUniformRandomInplaceGradKernel); +PD_REGISTER_STRUCT_KERNEL(uniform_random_inplace, + XPU, + ALL_LAYOUT, + ops::XPUUniformRandomInplaceKernel, + float) {} +PD_REGISTER_STRUCT_KERNEL(uniform_random_inplace_grad, + XPU, + ALL_LAYOUT, + ops::XPUUniformRandomInplaceGradKernel, + float) {} #endif // PADDLE_WITH_XPU -- GitLab