未验证 提交 d9edb233 编写于 作者: H huangjiyi 提交者: GitHub

Register fluid kerenls to phi [part 13] (#53037)

* update

* fix bug

* update

* fix bug
上级 7a323f78
......@@ -63,7 +63,7 @@ __global__ void YoloBoxHeadCudaKernel(const T* input,
}
}
template <typename T>
template <typename T, typename DeviceContext>
class YoloBoxHeadKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......@@ -103,4 +103,5 @@ class YoloBoxHeadKernel : public framework::OpKernel<T> {
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(yolo_box_head, ops::YoloBoxHeadKernel<float>);
PD_REGISTER_STRUCT_KERNEL(
yolo_box_head, GPU, ALL_LAYOUT, ops::YoloBoxHeadKernel, float) {}
......@@ -315,7 +315,7 @@ static void YoloTensorParseCuda(
prob_thresh);
}
template <typename T>
template <typename T, typename DeviceContext>
class YoloBoxPostKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......@@ -555,4 +555,5 @@ class YoloBoxPostKernel : public framework::OpKernel<T> {
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(yolo_box_post, ops::YoloBoxPostKernel<float>);
PD_REGISTER_STRUCT_KERNEL(
yolo_box_post, GPU, ALL_LAYOUT, ops::YoloBoxPostKernel, float) {}
......@@ -56,7 +56,7 @@ inline void UniformRealDistribution(paddle::platform::bfloat16 *data,
// It seems that Eigen::Tensor::random in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template <typename T>
template <typename T, typename DeviceContext>
class CPUUniformRandomKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
......@@ -178,16 +178,20 @@ with random values sampled from a uniform distribution.
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OPERATOR(
uniform_random_batch_size_like,
paddle::operators::UniformRandomBatchSizeLikeOp,
paddle::operators::UniformRandomBatchSizeLikeOpMaker,
ops::UniformRandomBatchSizeLikeOp,
ops::UniformRandomBatchSizeLikeOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
paddle::operators::BatchSizeLikeNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(
uniform_random_batch_size_like,
paddle::operators::CPUUniformRandomKernel<float>,
paddle::operators::CPUUniformRandomKernel<double>,
paddle::operators::CPUUniformRandomKernel<paddle::platform::bfloat16>);
ops::BatchSizeLikeNoNeedBufferVarsInferer);
PD_REGISTER_STRUCT_KERNEL(uniform_random_batch_size_like,
CPU,
ALL_LAYOUT,
ops::CPUUniformRandomKernel,
float,
double,
plat::bfloat16) {}
......@@ -16,7 +16,7 @@ limitations under the License. */
namespace paddle {
namespace operators {
template <typename T>
template <typename T, typename DeviceContext>
class GPUUniformRandomKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......@@ -59,6 +59,10 @@ class GPUUniformRandomKernel : public framework::OpKernel<T> {
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(uniform_random_batch_size_like,
paddle::operators::GPUUniformRandomKernel<float>,
paddle::operators::GPUUniformRandomKernel<double>);
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(uniform_random_batch_size_like,
GPU,
ALL_LAYOUT,
ops::GPUUniformRandomKernel,
float,
double) {}
......@@ -162,10 +162,7 @@ REGISTER_OPERATOR(unzip,
REGISTER_OPERATOR(unzip_grad, ops::unzipGradientOp);
REGISTER_OP_CPU_KERNEL(unzip,
ops::unzipOpKernel<int64_t>,
ops::unzipOpKernel<int64_t>);
REGISTER_OP_CPU_KERNEL(unzip_grad,
ops::unzipGradOpKernel<int64_t>,
ops::unzipGradOpKernel<int64_t>);
PD_REGISTER_STRUCT_KERNEL(unzip, CPU, ALL_LAYOUT, ops::unzipOpKernel, int64_t) {
}
PD_REGISTER_STRUCT_KERNEL(
unzip_grad, CPU, ALL_LAYOUT, ops::unzipGradOpKernel, int64_t) {}
......@@ -42,7 +42,7 @@ __global__ void unzipKernel(
}
}
template <typename T, typename LodType>
template <typename T, typename DeviceContext, typename LodType = int64_t>
class unzipCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......@@ -69,7 +69,7 @@ class unzipCUDAKernel : public framework::OpKernel<T> {
}
};
template <typename T>
template <typename T, typename DeviceContext>
class unzipGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......@@ -81,25 +81,24 @@ class unzipGradCUDAKernel : public framework::OpKernel<T> {
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
unzip,
ops::unzipCUDAKernel<float, int>,
ops::unzipCUDAKernel<double, int>,
ops::unzipCUDAKernel<paddle::platform::float16, int>,
ops::unzipCUDAKernel<int, int>,
ops::unzipCUDAKernel<bool, int>,
ops::unzipCUDAKernel<int64_t, int>,
ops::unzipCUDAKernel<float, int64_t>,
ops::unzipCUDAKernel<double, int64_t>,
ops::unzipCUDAKernel<paddle::platform::float16, int64_t>,
ops::unzipCUDAKernel<int, int64_t>,
ops::unzipCUDAKernel<bool, int64_t>,
ops::unzipCUDAKernel<int64_t, int64_t>);
REGISTER_OP_CUDA_KERNEL(unzip_grad,
ops::unzipGradCUDAKernel<float>,
ops::unzipGradCUDAKernel<double>,
ops::unzipGradCUDAKernel<paddle::platform::float16>,
ops::unzipGradCUDAKernel<int>,
ops::unzipGradCUDAKernel<bool>,
ops::unzipGradCUDAKernel<int64_t>);
namespace plat = paddle::platform;
PD_REGISTER_STRUCT_KERNEL(unzip,
GPU,
ALL_LAYOUT,
ops::unzipCUDAKernel,
float,
double,
plat::float16,
bool,
int,
int64_t) {}
PD_REGISTER_STRUCT_KERNEL(unzip_grad,
GPU,
ALL_LAYOUT,
ops::unzipGradCUDAKernel,
float,
double,
plat::float16,
bool,
int,
int64_t) {}
......@@ -19,7 +19,7 @@ limitations under the License. */
namespace paddle {
namespace operators {
template <typename T>
template <typename T, typename DeviceContext>
class unzipOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......@@ -27,7 +27,7 @@ class unzipOpKernel : public framework::OpKernel<T> {
}
};
template <typename T>
template <typename T, typename DeviceContext>
class unzipGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册