未验证 提交 7a323f78 编写于 作者: H huangjiyi 提交者: GitHub

update (#53033)

上级 a176a07e
......@@ -285,13 +285,20 @@ REGISTER_OPERATOR(sequence_pad,
REGISTER_OPERATOR(sequence_pad_grad,
ops::SequencePadGradOp,
ops::SequencePadGradOpNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_pad,
ops::SequencePadOpKernel<phi::CPUContext, float>,
ops::SequencePadOpKernel<phi::CPUContext, double>,
ops::SequencePadOpKernel<phi::CPUContext, int>,
ops::SequencePadOpKernel<phi::CPUContext, int64_t>);
REGISTER_OP_CPU_KERNEL(sequence_pad_grad,
ops::SequencePadGradOpKernel<phi::CPUContext, float>,
ops::SequencePadGradOpKernel<phi::CPUContext, double>,
ops::SequencePadGradOpKernel<phi::CPUContext, int>,
ops::SequencePadGradOpKernel<phi::CPUContext, int64_t>);
PD_REGISTER_STRUCT_KERNEL(sequence_pad,
CPU,
ALL_LAYOUT,
ops::SequencePadOpKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_STRUCT_KERNEL(sequence_pad_grad,
CPU,
ALL_LAYOUT,
ops::SequencePadGradOpKernel,
float,
double,
int,
int64_t) {}
......@@ -15,13 +15,19 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_pad_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_pad,
ops::SequencePadOpKernel<phi::GPUContext, float>,
ops::SequencePadOpKernel<phi::GPUContext, double>,
ops::SequencePadOpKernel<phi::GPUContext, int>,
ops::SequencePadOpKernel<phi::GPUContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(sequence_pad_grad,
ops::SequencePadGradOpKernel<phi::GPUContext, float>,
ops::SequencePadGradOpKernel<phi::GPUContext, double>,
ops::SequencePadGradOpKernel<phi::GPUContext, int>,
ops::SequencePadGradOpKernel<phi::GPUContext, int64_t>);
PD_REGISTER_STRUCT_KERNEL(sequence_pad,
GPU,
ALL_LAYOUT,
ops::SequencePadOpKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_STRUCT_KERNEL(sequence_pad_grad,
GPU,
ALL_LAYOUT,
ops::SequencePadGradOpKernel,
float,
double,
int,
int64_t) {}
......@@ -25,8 +25,7 @@ namespace paddle {
namespace operators {
using LoD = framework::LoD;
template <typename DeviceContext, typename T>
template <typename T, typename DeviceContext>
class SequencePadOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
......@@ -68,7 +67,7 @@ class SequencePadOpKernel : public framework::OpKernel<T> {
}
};
template <typename DeviceContext, typename T>
template <typename T, typename DeviceContext>
class SequencePadGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
......
......@@ -197,6 +197,9 @@ REGISTER_OPERATOR(sequence_pool_grad,
ops::SequencePoolGradOp,
ops::SequencePoolGradOpNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_pool_grad,
ops::SequencePoolGradKernel<phi::CPUContext, float>,
ops::SequencePoolGradKernel<phi::CPUContext, double>);
PD_REGISTER_STRUCT_KERNEL(sequence_pool_grad,
CPU,
ALL_LAYOUT,
ops::SequencePoolGradKernel,
float,
double) {}
......@@ -14,5 +14,5 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_pool_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_pool_grad,
ops::SequencePoolGradKernel<phi::GPUContext, float>);
PD_REGISTER_STRUCT_KERNEL(
sequence_pool_grad, GPU, ALL_LAYOUT, ops::SequencePoolGradKernel, float) {}
......@@ -23,7 +23,7 @@ limitations under the License. */
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
template <typename T, typename DeviceContext>
class SequencePoolGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......
......@@ -144,13 +144,19 @@ REGISTER_OPERATOR(sequence_reshape,
ops::SequenceReshapeGradOpMaker<paddle::framework::OpDesc>,
ops::SequenceReshapeGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(sequence_reshape_grad, ops::SequenceReshapeGradOp);
REGISTER_OP_CPU_KERNEL(sequence_reshape,
ops::SequenceReshapeKernel<phi::CPUContext, float>,
ops::SequenceReshapeKernel<phi::CPUContext, double>,
ops::SequenceReshapeKernel<phi::CPUContext, int>,
ops::SequenceReshapeKernel<phi::CPUContext, int64_t>);
REGISTER_OP_CPU_KERNEL(sequence_reshape_grad,
ops::SequenceReshapeGradKernel<phi::CPUContext, float>,
ops::SequenceReshapeGradKernel<phi::CPUContext, double>,
ops::SequenceReshapeGradKernel<phi::CPUContext, int64_t>,
ops::SequenceReshapeGradKernel<phi::CPUContext, int>);
PD_REGISTER_STRUCT_KERNEL(sequence_reshape,
CPU,
ALL_LAYOUT,
ops::SequenceReshapeKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_STRUCT_KERNEL(sequence_reshape_grad,
CPU,
ALL_LAYOUT,
ops::SequenceReshapeGradKernel,
float,
double,
int,
int64_t) {}
......@@ -15,14 +15,19 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_reshape_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_reshape,
ops::SequenceReshapeKernel<phi::GPUContext, float>,
ops::SequenceReshapeKernel<phi::GPUContext, double>,
ops::SequenceReshapeKernel<phi::GPUContext, int>,
ops::SequenceReshapeKernel<phi::GPUContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
sequence_reshape_grad,
ops::SequenceReshapeGradKernel<phi::GPUContext, float>,
ops::SequenceReshapeGradKernel<phi::GPUContext, double>,
ops::SequenceReshapeGradKernel<phi::GPUContext, int64_t>,
ops::SequenceReshapeGradKernel<phi::GPUContext, int>);
PD_REGISTER_STRUCT_KERNEL(sequence_reshape,
GPU,
ALL_LAYOUT,
ops::SequenceReshapeKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_STRUCT_KERNEL(sequence_reshape_grad,
GPU,
ALL_LAYOUT,
ops::SequenceReshapeGradKernel,
float,
double,
int,
int64_t) {}
......@@ -20,7 +20,7 @@ namespace paddle {
namespace operators {
using LoDTensor = phi::DenseTensor;
template <typename DeviceContext, typename T>
template <typename T, typename DeviceContext>
class SequenceReshapeKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......@@ -85,7 +85,7 @@ class SequenceReshapeKernel : public framework::OpKernel<T> {
}
};
template <typename DeviceContext, typename T>
template <typename T, typename DeviceContext>
class SequenceReshapeGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
......
......@@ -22,9 +22,12 @@ REGISTER_OPERATOR(sequence_reverse,
ops::SequenceReverseGradOpMaker<paddle::framework::OpDesc>,
ops::SequenceReverseGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL(sequence_reverse,
ops::SequenceReverseOpKernel<phi::CPUContext, uint8_t>,
ops::SequenceReverseOpKernel<phi::CPUContext, int>,
ops::SequenceReverseOpKernel<phi::CPUContext, int64_t>,
ops::SequenceReverseOpKernel<phi::CPUContext, float>,
ops::SequenceReverseOpKernel<phi::CPUContext, double>);
PD_REGISTER_STRUCT_KERNEL(sequence_reverse,
CPU,
ALL_LAYOUT,
ops::SequenceReverseOpKernel,
float,
double,
int,
int64_t,
uint8_t) {}
......@@ -15,10 +15,12 @@
#include "paddle/fluid/operators/sequence_ops/sequence_reverse_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_reverse,
ops::SequenceReverseOpKernel<phi::GPUContext, uint8_t>,
ops::SequenceReverseOpKernel<phi::GPUContext, int>,
ops::SequenceReverseOpKernel<phi::GPUContext, int64_t>,
ops::SequenceReverseOpKernel<phi::GPUContext, float>,
ops::SequenceReverseOpKernel<phi::GPUContext, double>);
PD_REGISTER_STRUCT_KERNEL(sequence_reverse,
GPU,
ALL_LAYOUT,
ops::SequenceReverseOpKernel,
float,
double,
int,
int64_t,
uint8_t) {}
......@@ -111,7 +111,7 @@ struct SequenceReverseFunctor {
size_t row_numel_;
};
template <typename DeviceContext, typename T>
template <typename T, typename DeviceContext>
class SequenceReverseOpKernel : public framework::OpKernel<T> {
using LoDTensor = phi::DenseTensor;
......
......@@ -186,13 +186,19 @@ REGISTER_OPERATOR(sequence_scatter,
REGISTER_OPERATOR(sequence_scatter_grad,
ops::SequenceScatterGradOp,
ops::SequenceScatterGradNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_scatter,
ops::SequenceScatterOpKernel<float>,
ops::SequenceScatterOpKernel<double>,
ops::SequenceScatterOpKernel<int>,
ops::SequenceScatterOpKernel<int64_t>);
REGISTER_OP_CPU_KERNEL(sequence_scatter_grad,
ops::SequenceScatterGradientOpKernel<float>,
ops::SequenceScatterGradientOpKernel<double>,
ops::SequenceScatterGradientOpKernel<int>,
ops::SequenceScatterGradientOpKernel<int64_t>);
PD_REGISTER_STRUCT_KERNEL(sequence_scatter,
CPU,
ALL_LAYOUT,
ops::SequenceScatterOpKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_STRUCT_KERNEL(sequence_scatter_grad,
CPU,
ALL_LAYOUT,
ops::SequenceScatterGradientOpKernel,
float,
double,
int,
int64_t) {}
......@@ -23,7 +23,7 @@ namespace operators {
using Tensor = phi::DenseTensor;
using LoDTensor = phi::DenseTensor;
template <typename T>
template <typename T, typename DeviceContext>
class SequenceScatterOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
......@@ -86,7 +86,7 @@ class SequenceScatterOpKernel : public framework::OpKernel<T> {
}
};
template <typename T>
template <typename T, typename DeviceContext>
class SequenceScatterGradientOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
......
......@@ -158,14 +158,19 @@ REGISTER_OPERATOR(sequence_slice,
REGISTER_OPERATOR(sequence_slice_grad,
ops::SequenceSliceGradOp,
ops::SequenceSliceGradNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(sequence_slice,
ops::SequenceSliceOpKernel<phi::CPUContext, float>,
ops::SequenceSliceOpKernel<phi::CPUContext, double>,
ops::SequenceSliceOpKernel<phi::CPUContext, int>,
ops::SequenceSliceOpKernel<phi::CPUContext, int64_t>);
REGISTER_OP_CPU_KERNEL(
sequence_slice_grad,
ops::SequenceSliceGradOpKernel<phi::CPUContext, float>,
ops::SequenceSliceGradOpKernel<phi::CPUContext, double>,
ops::SequenceSliceGradOpKernel<phi::CPUContext, int>,
ops::SequenceSliceGradOpKernel<phi::CPUContext, int64_t>);
PD_REGISTER_STRUCT_KERNEL(sequence_slice,
CPU,
ALL_LAYOUT,
ops::SequenceSliceOpKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_STRUCT_KERNEL(sequence_slice_grad,
CPU,
ALL_LAYOUT,
ops::SequenceSliceGradOpKernel,
float,
double,
int,
int64_t) {}
......@@ -15,14 +15,19 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_slice_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(sequence_slice,
ops::SequenceSliceOpKernel<phi::GPUContext, float>,
ops::SequenceSliceOpKernel<phi::GPUContext, double>,
ops::SequenceSliceOpKernel<phi::GPUContext, int>,
ops::SequenceSliceOpKernel<phi::GPUContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
sequence_slice_grad,
ops::SequenceSliceGradOpKernel<phi::GPUContext, float>,
ops::SequenceSliceGradOpKernel<phi::GPUContext, double>,
ops::SequenceSliceGradOpKernel<phi::GPUContext, int>,
ops::SequenceSliceGradOpKernel<phi::GPUContext, int64_t>);
PD_REGISTER_STRUCT_KERNEL(sequence_slice,
GPU,
ALL_LAYOUT,
ops::SequenceSliceOpKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_STRUCT_KERNEL(sequence_slice_grad,
GPU,
ALL_LAYOUT,
ops::SequenceSliceGradOpKernel,
float,
double,
int,
int64_t) {}
......@@ -40,7 +40,7 @@ inline LoD SequenceSliceLoD(const T& in,
return out_lod;
}
template <typename DeviceContext, typename T>
template <typename T, typename DeviceContext>
class SequenceSliceOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
......@@ -151,7 +151,7 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> {
}
};
template <typename DeviceContext, typename T>
template <typename T, typename DeviceContext>
class SequenceSliceGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册