未验证 提交 7a323f78 编写于 作者: H huangjiyi 提交者: GitHub

update (#53033)

上级 a176a07e
...@@ -285,13 +285,20 @@ REGISTER_OPERATOR(sequence_pad, ...@@ -285,13 +285,20 @@ REGISTER_OPERATOR(sequence_pad,
REGISTER_OPERATOR(sequence_pad_grad, REGISTER_OPERATOR(sequence_pad_grad,
ops::SequencePadGradOp, ops::SequencePadGradOp,
ops::SequencePadGradOpNoNeedBufferVarsInferer); ops::SequencePadGradOpNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_pad,
ops::SequencePadOpKernel<phi::CPUContext, float>, PD_REGISTER_STRUCT_KERNEL(sequence_pad,
ops::SequencePadOpKernel<phi::CPUContext, double>, CPU,
ops::SequencePadOpKernel<phi::CPUContext, int>, ALL_LAYOUT,
ops::SequencePadOpKernel<phi::CPUContext, int64_t>); ops::SequencePadOpKernel,
REGISTER_OP_CPU_KERNEL(sequence_pad_grad, float,
ops::SequencePadGradOpKernel<phi::CPUContext, float>, double,
ops::SequencePadGradOpKernel<phi::CPUContext, double>, int,
ops::SequencePadGradOpKernel<phi::CPUContext, int>, int64_t) {}
ops::SequencePadGradOpKernel<phi::CPUContext, int64_t>); PD_REGISTER_STRUCT_KERNEL(sequence_pad_grad,
CPU,
ALL_LAYOUT,
ops::SequencePadGradOpKernel,
float,
double,
int,
int64_t) {}
...@@ -15,13 +15,19 @@ limitations under the License. */ ...@@ -15,13 +15,19 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_pad_op.h" #include "paddle/fluid/operators/sequence_ops/sequence_pad_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_pad, PD_REGISTER_STRUCT_KERNEL(sequence_pad,
ops::SequencePadOpKernel<phi::GPUContext, float>, GPU,
ops::SequencePadOpKernel<phi::GPUContext, double>, ALL_LAYOUT,
ops::SequencePadOpKernel<phi::GPUContext, int>, ops::SequencePadOpKernel,
ops::SequencePadOpKernel<phi::GPUContext, int64_t>); float,
REGISTER_OP_CUDA_KERNEL(sequence_pad_grad, double,
ops::SequencePadGradOpKernel<phi::GPUContext, float>, int,
ops::SequencePadGradOpKernel<phi::GPUContext, double>, int64_t) {}
ops::SequencePadGradOpKernel<phi::GPUContext, int>, PD_REGISTER_STRUCT_KERNEL(sequence_pad_grad,
ops::SequencePadGradOpKernel<phi::GPUContext, int64_t>); GPU,
ALL_LAYOUT,
ops::SequencePadGradOpKernel,
float,
double,
int,
int64_t) {}
...@@ -25,8 +25,7 @@ namespace paddle { ...@@ -25,8 +25,7 @@ namespace paddle {
namespace operators { namespace operators {
using LoD = framework::LoD; using LoD = framework::LoD;
template <typename T, typename DeviceContext>
template <typename DeviceContext, typename T>
class SequencePadOpKernel : public framework::OpKernel<T> { class SequencePadOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
...@@ -68,7 +67,7 @@ class SequencePadOpKernel : public framework::OpKernel<T> { ...@@ -68,7 +67,7 @@ class SequencePadOpKernel : public framework::OpKernel<T> {
} }
}; };
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequencePadGradOpKernel : public framework::OpKernel<T> { class SequencePadGradOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
......
...@@ -197,6 +197,9 @@ REGISTER_OPERATOR(sequence_pool_grad, ...@@ -197,6 +197,9 @@ REGISTER_OPERATOR(sequence_pool_grad,
ops::SequencePoolGradOp, ops::SequencePoolGradOp,
ops::SequencePoolGradOpNoNeedBufferVarsInferer); ops::SequencePoolGradOpNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_pool_grad, PD_REGISTER_STRUCT_KERNEL(sequence_pool_grad,
ops::SequencePoolGradKernel<phi::CPUContext, float>, CPU,
ops::SequencePoolGradKernel<phi::CPUContext, double>); ALL_LAYOUT,
ops::SequencePoolGradKernel,
float,
double) {}
...@@ -14,5 +14,5 @@ limitations under the License. */ ...@@ -14,5 +14,5 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_pool_op.h" #include "paddle/fluid/operators/sequence_ops/sequence_pool_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_pool_grad, PD_REGISTER_STRUCT_KERNEL(
ops::SequencePoolGradKernel<phi::GPUContext, float>); sequence_pool_grad, GPU, ALL_LAYOUT, ops::SequencePoolGradKernel, float) {}
...@@ -23,7 +23,7 @@ limitations under the License. */ ...@@ -23,7 +23,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequencePoolGradKernel : public framework::OpKernel<T> { class SequencePoolGradKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
......
...@@ -144,13 +144,19 @@ REGISTER_OPERATOR(sequence_reshape, ...@@ -144,13 +144,19 @@ REGISTER_OPERATOR(sequence_reshape,
ops::SequenceReshapeGradOpMaker<paddle::framework::OpDesc>, ops::SequenceReshapeGradOpMaker<paddle::framework::OpDesc>,
ops::SequenceReshapeGradOpMaker<paddle::imperative::OpBase>); ops::SequenceReshapeGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(sequence_reshape_grad, ops::SequenceReshapeGradOp); REGISTER_OPERATOR(sequence_reshape_grad, ops::SequenceReshapeGradOp);
REGISTER_OP_CPU_KERNEL(sequence_reshape, PD_REGISTER_STRUCT_KERNEL(sequence_reshape,
ops::SequenceReshapeKernel<phi::CPUContext, float>, CPU,
ops::SequenceReshapeKernel<phi::CPUContext, double>, ALL_LAYOUT,
ops::SequenceReshapeKernel<phi::CPUContext, int>, ops::SequenceReshapeKernel,
ops::SequenceReshapeKernel<phi::CPUContext, int64_t>); float,
REGISTER_OP_CPU_KERNEL(sequence_reshape_grad, double,
ops::SequenceReshapeGradKernel<phi::CPUContext, float>, int,
ops::SequenceReshapeGradKernel<phi::CPUContext, double>, int64_t) {}
ops::SequenceReshapeGradKernel<phi::CPUContext, int64_t>, PD_REGISTER_STRUCT_KERNEL(sequence_reshape_grad,
ops::SequenceReshapeGradKernel<phi::CPUContext, int>); CPU,
ALL_LAYOUT,
ops::SequenceReshapeGradKernel,
float,
double,
int,
int64_t) {}
...@@ -15,14 +15,19 @@ limitations under the License. */ ...@@ -15,14 +15,19 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_reshape_op.h" #include "paddle/fluid/operators/sequence_ops/sequence_reshape_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_reshape, PD_REGISTER_STRUCT_KERNEL(sequence_reshape,
ops::SequenceReshapeKernel<phi::GPUContext, float>, GPU,
ops::SequenceReshapeKernel<phi::GPUContext, double>, ALL_LAYOUT,
ops::SequenceReshapeKernel<phi::GPUContext, int>, ops::SequenceReshapeKernel,
ops::SequenceReshapeKernel<phi::GPUContext, int64_t>); float,
REGISTER_OP_CUDA_KERNEL( double,
sequence_reshape_grad, int,
ops::SequenceReshapeGradKernel<phi::GPUContext, float>, int64_t) {}
ops::SequenceReshapeGradKernel<phi::GPUContext, double>, PD_REGISTER_STRUCT_KERNEL(sequence_reshape_grad,
ops::SequenceReshapeGradKernel<phi::GPUContext, int64_t>, GPU,
ops::SequenceReshapeGradKernel<phi::GPUContext, int>); ALL_LAYOUT,
ops::SequenceReshapeGradKernel,
float,
double,
int,
int64_t) {}
...@@ -20,7 +20,7 @@ namespace paddle { ...@@ -20,7 +20,7 @@ namespace paddle {
namespace operators { namespace operators {
using LoDTensor = phi::DenseTensor; using LoDTensor = phi::DenseTensor;
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceReshapeKernel : public framework::OpKernel<T> { class SequenceReshapeKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
...@@ -85,7 +85,7 @@ class SequenceReshapeKernel : public framework::OpKernel<T> { ...@@ -85,7 +85,7 @@ class SequenceReshapeKernel : public framework::OpKernel<T> {
} }
}; };
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceReshapeGradKernel : public framework::OpKernel<T> { class SequenceReshapeGradKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
......
...@@ -22,9 +22,12 @@ REGISTER_OPERATOR(sequence_reverse, ...@@ -22,9 +22,12 @@ REGISTER_OPERATOR(sequence_reverse,
ops::SequenceReverseGradOpMaker<paddle::framework::OpDesc>, ops::SequenceReverseGradOpMaker<paddle::framework::OpDesc>,
ops::SequenceReverseGradOpMaker<paddle::imperative::OpBase>); ops::SequenceReverseGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL(sequence_reverse, PD_REGISTER_STRUCT_KERNEL(sequence_reverse,
ops::SequenceReverseOpKernel<phi::CPUContext, uint8_t>, CPU,
ops::SequenceReverseOpKernel<phi::CPUContext, int>, ALL_LAYOUT,
ops::SequenceReverseOpKernel<phi::CPUContext, int64_t>, ops::SequenceReverseOpKernel,
ops::SequenceReverseOpKernel<phi::CPUContext, float>, float,
ops::SequenceReverseOpKernel<phi::CPUContext, double>); double,
int,
int64_t,
uint8_t) {}
...@@ -15,10 +15,12 @@ ...@@ -15,10 +15,12 @@
#include "paddle/fluid/operators/sequence_ops/sequence_reverse_op.h" #include "paddle/fluid/operators/sequence_ops/sequence_reverse_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(sequence_reverse,
REGISTER_OP_CUDA_KERNEL(sequence_reverse, GPU,
ops::SequenceReverseOpKernel<phi::GPUContext, uint8_t>, ALL_LAYOUT,
ops::SequenceReverseOpKernel<phi::GPUContext, int>, ops::SequenceReverseOpKernel,
ops::SequenceReverseOpKernel<phi::GPUContext, int64_t>, float,
ops::SequenceReverseOpKernel<phi::GPUContext, float>, double,
ops::SequenceReverseOpKernel<phi::GPUContext, double>); int,
int64_t,
uint8_t) {}
...@@ -111,7 +111,7 @@ struct SequenceReverseFunctor { ...@@ -111,7 +111,7 @@ struct SequenceReverseFunctor {
size_t row_numel_; size_t row_numel_;
}; };
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceReverseOpKernel : public framework::OpKernel<T> { class SequenceReverseOpKernel : public framework::OpKernel<T> {
using LoDTensor = phi::DenseTensor; using LoDTensor = phi::DenseTensor;
......
...@@ -186,13 +186,19 @@ REGISTER_OPERATOR(sequence_scatter, ...@@ -186,13 +186,19 @@ REGISTER_OPERATOR(sequence_scatter,
REGISTER_OPERATOR(sequence_scatter_grad, REGISTER_OPERATOR(sequence_scatter_grad,
ops::SequenceScatterGradOp, ops::SequenceScatterGradOp,
ops::SequenceScatterGradNoNeedBufferVarsInferer); ops::SequenceScatterGradNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_scatter, PD_REGISTER_STRUCT_KERNEL(sequence_scatter,
ops::SequenceScatterOpKernel<float>, CPU,
ops::SequenceScatterOpKernel<double>, ALL_LAYOUT,
ops::SequenceScatterOpKernel<int>, ops::SequenceScatterOpKernel,
ops::SequenceScatterOpKernel<int64_t>); float,
REGISTER_OP_CPU_KERNEL(sequence_scatter_grad, double,
ops::SequenceScatterGradientOpKernel<float>, int,
ops::SequenceScatterGradientOpKernel<double>, int64_t) {}
ops::SequenceScatterGradientOpKernel<int>, PD_REGISTER_STRUCT_KERNEL(sequence_scatter_grad,
ops::SequenceScatterGradientOpKernel<int64_t>); CPU,
ALL_LAYOUT,
ops::SequenceScatterGradientOpKernel,
float,
double,
int,
int64_t) {}
...@@ -23,7 +23,7 @@ namespace operators { ...@@ -23,7 +23,7 @@ namespace operators {
using Tensor = phi::DenseTensor; using Tensor = phi::DenseTensor;
using LoDTensor = phi::DenseTensor; using LoDTensor = phi::DenseTensor;
template <typename T> template <typename T, typename DeviceContext>
class SequenceScatterOpKernel : public framework::OpKernel<T> { class SequenceScatterOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
...@@ -86,7 +86,7 @@ class SequenceScatterOpKernel : public framework::OpKernel<T> { ...@@ -86,7 +86,7 @@ class SequenceScatterOpKernel : public framework::OpKernel<T> {
} }
}; };
template <typename T> template <typename T, typename DeviceContext>
class SequenceScatterGradientOpKernel : public framework::OpKernel<T> { class SequenceScatterGradientOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
......
...@@ -158,14 +158,19 @@ REGISTER_OPERATOR(sequence_slice, ...@@ -158,14 +158,19 @@ REGISTER_OPERATOR(sequence_slice,
REGISTER_OPERATOR(sequence_slice_grad, REGISTER_OPERATOR(sequence_slice_grad,
ops::SequenceSliceGradOp, ops::SequenceSliceGradOp,
ops::SequenceSliceGradNoNeedBufferVarsInferer); ops::SequenceSliceGradNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_slice, PD_REGISTER_STRUCT_KERNEL(sequence_slice,
ops::SequenceSliceOpKernel<phi::CPUContext, float>, CPU,
ops::SequenceSliceOpKernel<phi::CPUContext, double>, ALL_LAYOUT,
ops::SequenceSliceOpKernel<phi::CPUContext, int>, ops::SequenceSliceOpKernel,
ops::SequenceSliceOpKernel<phi::CPUContext, int64_t>); float,
REGISTER_OP_CPU_KERNEL( double,
sequence_slice_grad, int,
ops::SequenceSliceGradOpKernel<phi::CPUContext, float>, int64_t) {}
ops::SequenceSliceGradOpKernel<phi::CPUContext, double>, PD_REGISTER_STRUCT_KERNEL(sequence_slice_grad,
ops::SequenceSliceGradOpKernel<phi::CPUContext, int>, CPU,
ops::SequenceSliceGradOpKernel<phi::CPUContext, int64_t>); ALL_LAYOUT,
ops::SequenceSliceGradOpKernel,
float,
double,
int,
int64_t) {}
...@@ -15,14 +15,19 @@ limitations under the License. */ ...@@ -15,14 +15,19 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_slice_op.h" #include "paddle/fluid/operators/sequence_ops/sequence_slice_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_slice, PD_REGISTER_STRUCT_KERNEL(sequence_slice,
ops::SequenceSliceOpKernel<phi::GPUContext, float>, GPU,
ops::SequenceSliceOpKernel<phi::GPUContext, double>, ALL_LAYOUT,
ops::SequenceSliceOpKernel<phi::GPUContext, int>, ops::SequenceSliceOpKernel,
ops::SequenceSliceOpKernel<phi::GPUContext, int64_t>); float,
REGISTER_OP_CUDA_KERNEL( double,
sequence_slice_grad, int,
ops::SequenceSliceGradOpKernel<phi::GPUContext, float>, int64_t) {}
ops::SequenceSliceGradOpKernel<phi::GPUContext, double>, PD_REGISTER_STRUCT_KERNEL(sequence_slice_grad,
ops::SequenceSliceGradOpKernel<phi::GPUContext, int>, GPU,
ops::SequenceSliceGradOpKernel<phi::GPUContext, int64_t>); ALL_LAYOUT,
ops::SequenceSliceGradOpKernel,
float,
double,
int,
int64_t) {}
...@@ -40,7 +40,7 @@ inline LoD SequenceSliceLoD(const T& in, ...@@ -40,7 +40,7 @@ inline LoD SequenceSliceLoD(const T& in,
return out_lod; return out_lod;
} }
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceSliceOpKernel : public framework::OpKernel<T> { class SequenceSliceOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
...@@ -151,7 +151,7 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> { ...@@ -151,7 +151,7 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> {
} }
}; };
template <typename DeviceContext, typename T> template <typename T, typename DeviceContext>
class SequenceSliceGradOpKernel : public framework::OpKernel<T> { class SequenceSliceGradOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册