diff --git a/paddle/fluid/operators/fused/yolo_box_head_op.cu b/paddle/fluid/operators/fused/yolo_box_head_op.cu index 88d589f85b0ec6e502151e7a0b1daad78b7af77f..abb7b5aeaae00f31ce9e30d6db12cd85244d2814 100644 --- a/paddle/fluid/operators/fused/yolo_box_head_op.cu +++ b/paddle/fluid/operators/fused/yolo_box_head_op.cu @@ -63,7 +63,7 @@ __global__ void YoloBoxHeadCudaKernel(const T* input, } } -template +template class YoloBoxHeadKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -103,4 +103,5 @@ class YoloBoxHeadKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(yolo_box_head, ops::YoloBoxHeadKernel); +PD_REGISTER_STRUCT_KERNEL( + yolo_box_head, GPU, ALL_LAYOUT, ops::YoloBoxHeadKernel, float) {} diff --git a/paddle/fluid/operators/fused/yolo_box_post_op.cu b/paddle/fluid/operators/fused/yolo_box_post_op.cu index fc01d7027f31d0621fea436b392ff951bc5b5da9..72bb97a2aae9ee879ccbaa43a3636430e1d12f77 100644 --- a/paddle/fluid/operators/fused/yolo_box_post_op.cu +++ b/paddle/fluid/operators/fused/yolo_box_post_op.cu @@ -315,7 +315,7 @@ static void YoloTensorParseCuda( prob_thresh); } -template +template class YoloBoxPostKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -555,4 +555,5 @@ class YoloBoxPostKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(yolo_box_post, ops::YoloBoxPostKernel); +PD_REGISTER_STRUCT_KERNEL( + yolo_box_post, GPU, ALL_LAYOUT, ops::YoloBoxPostKernel, float) {} diff --git a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc index bdbef1a7eee72340c7c940bada1b84c0fc6d644a..8acaad0f60eaeb7ac0fa9259aacab39b13518875 100644 --- a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc +++ b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc @@ -56,7 +56,7 @@ inline void UniformRealDistribution(paddle::platform::bfloat16 *data, // It seems that Eigen::Tensor::random in GPU will SEGFAULT. // Use std::random and thrust::random(thrust is a std library in CUDA) to // implement uniform random. -template +template class CPUUniformRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { @@ -178,16 +178,20 @@ with random values sampled from a uniform distribution. } // namespace operators } // namespace paddle +namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OPERATOR( uniform_random_batch_size_like, - paddle::operators::UniformRandomBatchSizeLikeOp, - paddle::operators::UniformRandomBatchSizeLikeOpMaker, + ops::UniformRandomBatchSizeLikeOp, + ops::UniformRandomBatchSizeLikeOpMaker, paddle::framework::EmptyGradOpMaker, paddle::framework::EmptyGradOpMaker, - paddle::operators::BatchSizeLikeNoNeedBufferVarsInferer); - -REGISTER_OP_CPU_KERNEL( - uniform_random_batch_size_like, - paddle::operators::CPUUniformRandomKernel, - paddle::operators::CPUUniformRandomKernel, - paddle::operators::CPUUniformRandomKernel); + ops::BatchSizeLikeNoNeedBufferVarsInferer); + +PD_REGISTER_STRUCT_KERNEL(uniform_random_batch_size_like, + CPU, + ALL_LAYOUT, + ops::CPUUniformRandomKernel, + float, + double, + plat::bfloat16) {} diff --git a/paddle/fluid/operators/uniform_random_batch_size_like_op.cu b/paddle/fluid/operators/uniform_random_batch_size_like_op.cu index 4c60cb76fb9ea03ff9f066928b4313578a655ee0..1bbd6eba3c662e0881966105a1bbbaa66e33e9a2 100644 --- a/paddle/fluid/operators/uniform_random_batch_size_like_op.cu +++ b/paddle/fluid/operators/uniform_random_batch_size_like_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class GPUUniformRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -59,6 +59,10 @@ class GPUUniformRandomKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_CUDA_KERNEL(uniform_random_batch_size_like, - paddle::operators::GPUUniformRandomKernel, - paddle::operators::GPUUniformRandomKernel); +namespace ops = paddle::operators; +PD_REGISTER_STRUCT_KERNEL(uniform_random_batch_size_like, + GPU, + ALL_LAYOUT, + ops::GPUUniformRandomKernel, + float, + double) {} diff --git a/paddle/fluid/operators/unzip_op.cc b/paddle/fluid/operators/unzip_op.cc index ffb46c2f4b56c1816428443c3f8ff8fdac856e77..556b345c17b0aa180917ebedc70958c2f59e94ff 100644 --- a/paddle/fluid/operators/unzip_op.cc +++ b/paddle/fluid/operators/unzip_op.cc @@ -162,10 +162,7 @@ REGISTER_OPERATOR(unzip, REGISTER_OPERATOR(unzip_grad, ops::unzipGradientOp); -REGISTER_OP_CPU_KERNEL(unzip, - ops::unzipOpKernel, - ops::unzipOpKernel); - -REGISTER_OP_CPU_KERNEL(unzip_grad, - ops::unzipGradOpKernel, - ops::unzipGradOpKernel); +PD_REGISTER_STRUCT_KERNEL(unzip, CPU, ALL_LAYOUT, ops::unzipOpKernel, int64_t) { +} +PD_REGISTER_STRUCT_KERNEL( + unzip_grad, CPU, ALL_LAYOUT, ops::unzipGradOpKernel, int64_t) {} diff --git a/paddle/fluid/operators/unzip_op.cu b/paddle/fluid/operators/unzip_op.cu index 0605ce4ab91191aa9a1f80805092eaac68de1e23..d60af556cd279c786ca7562946eebba275b7da9b 100644 --- a/paddle/fluid/operators/unzip_op.cu +++ b/paddle/fluid/operators/unzip_op.cu @@ -42,7 +42,7 @@ __global__ void unzipKernel( } } -template +template class unzipCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -69,7 +69,7 @@ class unzipCUDAKernel : public framework::OpKernel { } }; -template +template class unzipGradCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -81,25 +81,24 @@ class unzipGradCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - unzip, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel, - ops::unzipCUDAKernel); - -REGISTER_OP_CUDA_KERNEL(unzip_grad, - ops::unzipGradCUDAKernel, - ops::unzipGradCUDAKernel, - ops::unzipGradCUDAKernel, - ops::unzipGradCUDAKernel, - ops::unzipGradCUDAKernel, - ops::unzipGradCUDAKernel); +namespace plat = paddle::platform; +PD_REGISTER_STRUCT_KERNEL(unzip, + GPU, + ALL_LAYOUT, + ops::unzipCUDAKernel, + float, + double, + plat::float16, + bool, + int, + int64_t) {} +PD_REGISTER_STRUCT_KERNEL(unzip_grad, + GPU, + ALL_LAYOUT, + ops::unzipGradCUDAKernel, + float, + double, + plat::float16, + bool, + int, + int64_t) {} diff --git a/paddle/fluid/operators/unzip_op.h b/paddle/fluid/operators/unzip_op.h index f177f69476f1ed623a7941b0683f20c536ff2f52..6829d00dccf563d4d0801489956637e387f1ef6b 100644 --- a/paddle/fluid/operators/unzip_op.h +++ b/paddle/fluid/operators/unzip_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class unzipOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -27,7 +27,7 @@ class unzipOpKernel : public framework::OpKernel { } }; -template +template class unzipGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override {