未验证 提交 a176a07e 编写于 作者: H huangjiyi 提交者: GitHub

Register fluid kerenls to phi [part 8] (#53032)

* update

* fix bug
上级 e0c14fdf
...@@ -269,9 +269,11 @@ REGISTER_OPERATOR(sequence_conv_grad, ...@@ -269,9 +269,11 @@ REGISTER_OPERATOR(sequence_conv_grad,
ops::SequenceConvGradOp, ops::SequenceConvGradOp,
ops::SequenceConvGradNoNeedBufferVarsInference); ops::SequenceConvGradNoNeedBufferVarsInference);
REGISTER_OP_CPU_KERNEL(sequence_conv, PD_REGISTER_STRUCT_KERNEL(
ops::SequenceConvKernel<phi::CPUContext, float>, sequence_conv, CPU, ALL_LAYOUT, ops::SequenceConvKernel, float, double) {}
ops::SequenceConvKernel<phi::CPUContext, double>); PD_REGISTER_STRUCT_KERNEL(sequence_conv_grad,
REGISTER_OP_CPU_KERNEL(sequence_conv_grad, CPU,
ops::SequenceConvGradKernel<phi::CPUContext, float>, ALL_LAYOUT,
ops::SequenceConvGradKernel<phi::CPUContext, double>); ops::SequenceConvGradKernel,
float,
double) {}
...@@ -15,9 +15,11 @@ limitations under the License. */ ...@@ -15,9 +15,11 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_conv_op.h" #include "paddle/fluid/operators/sequence_ops/sequence_conv_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_conv, PD_REGISTER_STRUCT_KERNEL(
ops::SequenceConvKernel<phi::GPUContext, float>, sequence_conv, GPU, ALL_LAYOUT, ops::SequenceConvKernel, float, double) {}
ops::SequenceConvKernel<phi::GPUContext, double>); PD_REGISTER_STRUCT_KERNEL(sequence_conv_grad,
REGISTER_OP_CUDA_KERNEL(sequence_conv_grad, GPU,
ops::SequenceConvGradKernel<phi::GPUContext, float>, ALL_LAYOUT,
ops::SequenceConvGradKernel<phi::GPUContext, double>); ops::SequenceConvGradKernel,
float,
double) {}
...@@ -22,7 +22,7 @@ limitations under the License. */ ...@@ -22,7 +22,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceConvKernel : public framework::OpKernel<T> { class SequenceConvKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
...@@ -85,7 +85,7 @@ class SequenceConvKernel : public framework::OpKernel<T> { ...@@ -85,7 +85,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
} }
}; };
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceConvGradKernel : public framework::OpKernel<T> { class SequenceConvGradKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
......
...@@ -88,6 +88,9 @@ namespace ops = paddle::operators; ...@@ -88,6 +88,9 @@ namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(sequence_enumerate, REGISTER_OP_WITHOUT_GRADIENT(sequence_enumerate,
ops::SequenceEnumerateOp, ops::SequenceEnumerateOp,
ops::SequenceEnumerateOpMaker); ops::SequenceEnumerateOpMaker);
REGISTER_OP_CPU_KERNEL(sequence_enumerate, PD_REGISTER_STRUCT_KERNEL(sequence_enumerate,
ops::SequenceEnumerateKernel<phi::CPUContext, int32_t>, CPU,
ops::SequenceEnumerateKernel<phi::CPUContext, int64_t>); ALL_LAYOUT,
ops::SequenceEnumerateKernel,
int32_t,
int64_t) {}
...@@ -47,7 +47,7 @@ __global__ void CalcOutPut(const T* in_data, ...@@ -47,7 +47,7 @@ __global__ void CalcOutPut(const T* in_data,
} }
} }
template <typename T> template <typename T, typename DeviceContext>
class SequenceEnumerateOpCUDAKernel : public framework::OpKernel<T> { class SequenceEnumerateOpCUDAKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
...@@ -91,7 +91,10 @@ class SequenceEnumerateOpCUDAKernel : public framework::OpKernel<T> { ...@@ -91,7 +91,10 @@ class SequenceEnumerateOpCUDAKernel : public framework::OpKernel<T> {
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OP_CUDA_KERNEL( namespace ops = paddle::operators;
sequence_enumerate, PD_REGISTER_STRUCT_KERNEL(sequence_enumerate,
paddle::operators::SequenceEnumerateOpCUDAKernel<int32_t>, GPU,
paddle::operators::SequenceEnumerateOpCUDAKernel<int64_t>); ALL_LAYOUT,
ops::SequenceEnumerateOpCUDAKernel,
int32_t,
int64_t) {}
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceEnumerateKernel : public framework::OpKernel<T> { class SequenceEnumerateKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
......
...@@ -98,6 +98,9 @@ namespace ops = paddle::operators; ...@@ -98,6 +98,9 @@ namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(sequence_erase, REGISTER_OP_WITHOUT_GRADIENT(sequence_erase,
ops::SequenceEraseOp, ops::SequenceEraseOp,
ops::SequenceEraseOpMaker); ops::SequenceEraseOpMaker);
REGISTER_OP_CPU_KERNEL(sequence_erase, PD_REGISTER_STRUCT_KERNEL(sequence_erase,
ops::SequenceEraseKernel<phi::CPUContext, int32_t>, CPU,
ops::SequenceEraseKernel<phi::CPUContext, int64_t>); ALL_LAYOUT,
ops::SequenceEraseKernel,
int32_t,
int64_t) {}
...@@ -62,7 +62,7 @@ __global__ void SetOutput(const T* in_dat, ...@@ -62,7 +62,7 @@ __global__ void SetOutput(const T* in_dat,
} }
} }
template <typename T> template <typename T, typename DeviceContext>
class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> { class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
...@@ -129,6 +129,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> { ...@@ -129,6 +129,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OP_CUDA_KERNEL(sequence_erase, namespace ops = paddle::operators;
paddle::operators::SequenceEraseOpCUDAKernel<int32_t>, PD_REGISTER_STRUCT_KERNEL(sequence_erase,
paddle::operators::SequenceEraseOpCUDAKernel<int64_t>); GPU,
ALL_LAYOUT,
ops::SequenceEraseOpCUDAKernel,
int32_t,
int64_t) {}
...@@ -21,7 +21,7 @@ limitations under the License. */ ...@@ -21,7 +21,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceEraseKernel : public framework::OpKernel<T> { class SequenceEraseKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
......
...@@ -209,14 +209,19 @@ REGISTER_OPERATOR( ...@@ -209,14 +209,19 @@ REGISTER_OPERATOR(
REGISTER_OPERATOR(sequence_expand_as_grad, REGISTER_OPERATOR(sequence_expand_as_grad,
ops::SequenceExpandAsOpGrad, ops::SequenceExpandAsOpGrad,
ops::SequenceExpandAsGradOpNoNeedBufferVarsInferer); ops::SequenceExpandAsGradOpNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_expand_as, PD_REGISTER_STRUCT_KERNEL(sequence_expand_as,
ops::SequenceExpandAsKernel<phi::CPUContext, float>, CPU,
ops::SequenceExpandAsKernel<phi::CPUContext, double>, ALL_LAYOUT,
ops::SequenceExpandAsKernel<phi::CPUContext, int>, ops::SequenceExpandAsKernel,
ops::SequenceExpandAsKernel<phi::CPUContext, int64_t>); float,
REGISTER_OP_CPU_KERNEL( double,
sequence_expand_as_grad, int,
ops::SequenceExpandAsGradKernel<phi::CPUContext, float>, int64_t) {}
ops::SequenceExpandAsGradKernel<phi::CPUContext, double>, PD_REGISTER_STRUCT_KERNEL(sequence_expand_as_grad,
ops::SequenceExpandAsGradKernel<phi::CPUContext, int>, CPU,
ops::SequenceExpandAsGradKernel<phi::CPUContext, int64_t>); ALL_LAYOUT,
ops::SequenceExpandAsGradKernel,
float,
double,
int,
int64_t) {}
...@@ -130,14 +130,19 @@ struct SequenceExpandAsGradFunctor<phi::GPUContext, T> { ...@@ -130,14 +130,19 @@ struct SequenceExpandAsGradFunctor<phi::GPUContext, T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_expand_as, PD_REGISTER_STRUCT_KERNEL(sequence_expand_as,
ops::SequenceExpandAsKernel<phi::GPUContext, float>, GPU,
ops::SequenceExpandAsKernel<phi::GPUContext, double>, ALL_LAYOUT,
ops::SequenceExpandAsKernel<phi::GPUContext, int>, ops::SequenceExpandAsKernel,
ops::SequenceExpandAsKernel<phi::GPUContext, int64_t>); float,
REGISTER_OP_CUDA_KERNEL( double,
sequence_expand_as_grad, int,
ops::SequenceExpandAsGradKernel<phi::GPUContext, float>, int64_t) {}
ops::SequenceExpandAsGradKernel<phi::GPUContext, double>, PD_REGISTER_STRUCT_KERNEL(sequence_expand_as_grad,
ops::SequenceExpandAsGradKernel<phi::GPUContext, int>, GPU,
ops::SequenceExpandAsGradKernel<phi::GPUContext, int64_t>); ALL_LAYOUT,
ops::SequenceExpandAsGradKernel,
float,
double,
int,
int64_t) {}
...@@ -67,7 +67,7 @@ struct SequenceExpandAsFunctor<phi::CPUContext, T> { ...@@ -67,7 +67,7 @@ struct SequenceExpandAsFunctor<phi::CPUContext, T> {
} }
}; };
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceExpandAsKernel : public framework::OpKernel<T> { class SequenceExpandAsKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext &context) const override { void Compute(const framework::ExecutionContext &context) const override {
...@@ -144,7 +144,7 @@ struct SequenceExpandAsGradFunctor<phi::CPUContext, T> { ...@@ -144,7 +144,7 @@ struct SequenceExpandAsGradFunctor<phi::CPUContext, T> {
} }
}; };
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceExpandAsGradKernel : public framework::OpKernel<T> { class SequenceExpandAsGradKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext &context) const override { void Compute(const framework::ExecutionContext &context) const override {
......
...@@ -281,13 +281,19 @@ REGISTER_OPERATOR(sequence_expand, ...@@ -281,13 +281,19 @@ REGISTER_OPERATOR(sequence_expand,
REGISTER_OPERATOR(sequence_expand_grad, REGISTER_OPERATOR(sequence_expand_grad,
ops::SequenceExpandOpGrad, ops::SequenceExpandOpGrad,
ops::SequenceExpandGradOpNoNeedBufferVarsInferer); ops::SequenceExpandGradOpNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_expand, PD_REGISTER_STRUCT_KERNEL(sequence_expand,
ops::SequenceExpandKernel<phi::CPUContext, float>, CPU,
ops::SequenceExpandKernel<phi::CPUContext, double>, ALL_LAYOUT,
ops::SequenceExpandKernel<phi::CPUContext, int>, ops::SequenceExpandKernel,
ops::SequenceExpandKernel<phi::CPUContext, int64_t>); float,
REGISTER_OP_CPU_KERNEL(sequence_expand_grad, double,
ops::SequenceExpandGradKernel<phi::CPUContext, float>, int,
ops::SequenceExpandGradKernel<phi::CPUContext, double>, int64_t) {}
ops::SequenceExpandGradKernel<phi::CPUContext, int>, PD_REGISTER_STRUCT_KERNEL(sequence_expand_grad,
ops::SequenceExpandGradKernel<phi::CPUContext, int64_t>); CPU,
ALL_LAYOUT,
ops::SequenceExpandGradKernel,
float,
double,
int,
int64_t) {}
...@@ -227,14 +227,19 @@ struct SequenceExpandGradFunctor<phi::GPUContext, T> { ...@@ -227,14 +227,19 @@ struct SequenceExpandGradFunctor<phi::GPUContext, T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_expand, PD_REGISTER_STRUCT_KERNEL(sequence_expand,
ops::SequenceExpandKernel<phi::GPUContext, float>, GPU,
ops::SequenceExpandKernel<phi::GPUContext, double>, ALL_LAYOUT,
ops::SequenceExpandKernel<phi::GPUContext, int>, ops::SequenceExpandKernel,
ops::SequenceExpandKernel<phi::GPUContext, int64_t>); float,
REGISTER_OP_CUDA_KERNEL( double,
sequence_expand_grad, int,
ops::SequenceExpandGradKernel<phi::GPUContext, float>, int64_t) {}
ops::SequenceExpandGradKernel<phi::GPUContext, double>, PD_REGISTER_STRUCT_KERNEL(sequence_expand_grad,
ops::SequenceExpandGradKernel<phi::GPUContext, int>, GPU,
ops::SequenceExpandGradKernel<phi::GPUContext, int64_t>); ALL_LAYOUT,
ops::SequenceExpandGradKernel,
float,
double,
int,
int64_t) {}
...@@ -81,7 +81,7 @@ struct SequenceExpandFunctor<phi::CPUContext, T> { ...@@ -81,7 +81,7 @@ struct SequenceExpandFunctor<phi::CPUContext, T> {
} }
}; };
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceExpandKernel : public framework::OpKernel<T> { class SequenceExpandKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
...@@ -185,7 +185,7 @@ struct SequenceExpandGradFunctor<phi::CPUContext, T> { ...@@ -185,7 +185,7 @@ struct SequenceExpandGradFunctor<phi::CPUContext, T> {
} }
}; };
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceExpandGradKernel : public framework::OpKernel<T> { class SequenceExpandGradKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
......
...@@ -102,9 +102,12 @@ REGISTER_OPERATOR( ...@@ -102,9 +102,12 @@ REGISTER_OPERATOR(
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>, paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>); paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL( namespace ops = paddle::operators;
sequence_mask, PD_REGISTER_STRUCT_KERNEL(sequence_mask,
paddle::operators::SequenceMaskKernel<phi::CPUContext, int>, CPU,
paddle::operators::SequenceMaskKernel<phi::CPUContext, int64_t>, ALL_LAYOUT,
paddle::operators::SequenceMaskKernel<phi::CPUContext, float>, ops::SequenceMaskKernel,
paddle::operators::SequenceMaskKernel<phi::CPUContext, double>); float,
double,
int,
int64_t) {}
...@@ -14,9 +14,12 @@ ...@@ -14,9 +14,12 @@
#include "paddle/fluid/operators/sequence_ops/sequence_mask_op.h" #include "paddle/fluid/operators/sequence_ops/sequence_mask_op.h"
REGISTER_OP_CUDA_KERNEL( namespace ops = paddle::operators;
sequence_mask, PD_REGISTER_STRUCT_KERNEL(sequence_mask,
paddle::operators::SequenceMaskKernel<phi::GPUContext, int>, GPU,
paddle::operators::SequenceMaskKernel<phi::GPUContext, int64_t>, ALL_LAYOUT,
paddle::operators::SequenceMaskKernel<phi::GPUContext, float>, ops::SequenceMaskKernel,
paddle::operators::SequenceMaskKernel<phi::GPUContext, double>); float,
double,
int,
int64_t) {}
...@@ -69,7 +69,7 @@ struct SequenceMaskFunctor { ...@@ -69,7 +69,7 @@ struct SequenceMaskFunctor {
int maxlen_; int maxlen_;
}; };
template <typename DeviceContext, typename Tx> template <typename Tx, typename DeviceContext>
class SequenceMaskKernel : public framework::OpKernel<Tx> { class SequenceMaskKernel : public framework::OpKernel<Tx> {
public: public:
void Compute(const framework::ExecutionContext &ctx) const override { void Compute(const framework::ExecutionContext &ctx) const override {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册